diff --git "a/1153.jsonl" "b/1153.jsonl"
new file mode 100644--- /dev/null
+++ "b/1153.jsonl"
@@ -0,0 +1,461 @@
+{"seq_id": "32233043", "text": "\nimport csv\nimport json\n\ncsv_file = open('BirdsBeingDicks.csv', 'r') # Open read only\njson_file = open('BBD.json', 'w') # Write only JSON\ncol_names = (\"created_utc\", \"score\", \"domain\", \"_id\", \"title\", \"author\", \"ups\", \"downs\", \"num_comments\", \"permalink\", \"selftext\", \"link_flair_text\", \"over_18\", \"thumbnail\", \"subreddit_id\", \"edited\", \"link_flair_css_class\", \"author_flair_css_class\", \"is_self\", \"name\", \"url\", \"distinguished\")\nread_csv = csv.DictReader(csv_file, col_names);\n\ni=0\nfor row in read_csv:\n\trow_necessary = {}\n\trow_necessary['time'] = row['created_utc']\n\trow_necessary['score'] = row['score']\n\trow_necessary['domain'] = row['domain']\n\trow_necessary['_id'] = row['_id']\n\trow_necessary['title'] = row['title']\n\trow_necessary['author'] = row['author']\n\trow_necessary['ups'] = row['ups']\n\trow_necessary['downs'] = row['downs']\n\trow_necessary['num_comments'] = row['num_comments']\n\trow_necessary['permalink'] = row['permalink']\n\trow_necessary['name'] = row['name']\n\trow_necessary['url'] = row['url']\n\tif i != 0:\n\t\tjson.dump(row_necessary, json_file, sort_keys=False)\n\t\tjson_file.write(\"\\n\")\n\ti+=1\n\njson_file.close()\njsonnew = open('BBDNEW.json','w');\nwith open('BBD.json') as f:\n\tcontent = f.readlines()\n\tfor line in content:\n\t\tif \"\\(\\)\\[\\]\" in line:\n\t\t\tline = line.replace(\"\\(\",\"\\\\(\")\n\t\t\tline = line.replace(\"\\)\",\"\\\\)\")\n\t\t\tline = line.replace(\"\\[\",\"\\\\[\")\n\t\t\tline = line.replace(\"\\]\",\"\\\\]\")\n\t\tjsonnew.write(line)\nf.close()\n", "sub_path": "importCSVtoJSON.py", "file_name": "importCSVtoJSON.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.DictReader", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "350521340", "text": "import uuid\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Person:\r\n def __init__(self):\r\n self.__name = \"Vitalii\"\r\n self.__last_name = 'Kolobok'\r\n self.__uuid = uuid.uuid4().hex\r\n\r\n def __repr__(self):\r\n return f'{self.__dict__}'\r\n\r\n def creating_new_object(self, **kwargs):\r\n self_copy = deepcopy(self)\r\n for value in kwargs:\r\n setattr(self_copy, value, kwargs[value])\r\n return self_copy\r\n\r\n\r\nperson = Person()\r\n\r\n''' Сopying primary data and adding data '''\r\nperson_objects = person.creating_new_object(firm_car='Renault', model_car='Koleos',\r\n color_car=\"Grey\", government_number='AX2578EM')\r\nprint(person_objects)\r\nprint(person)\r\n", "sub_path": "DZ_7 rev_1.py", "file_name": "DZ_7 rev_1.py", "file_ext": "py", "file_size_in_byte": 739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 9, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 15, "usage_type": "call"}]}
+{"seq_id": "625752915", "text": "from dataclasses import dataclass\nfrom base.common.models.request import BaseRequestModelKeys, SimpleRequestModel\n\n\n@dataclass\nclass GetTPOQueueDataRequestParams(BaseRequestModelKeys):\n COMPANY_ID: str = \"CompanyID\"\n CONTACT_ID: str = \"ContactID\"\n\n\nclass GetTPOQueueDataRequest(SimpleRequestModel):\n def __init__(self, company_id, contact_id, session_id, nonce, pretty_print):\n self.company_id = company_id\n self.contact_id = contact_id\n super().__init__(session_id=session_id, nonce=nonce, pretty_print=pretty_print)\n\n def to_params(self):\n args = super().to_params()\n args[GetTPOQueueDataRequestParams.COMPANY_ID] = self.company_id\n args[GetTPOQueueDataRequestParams.CONTACT_ID] = self.contact_id\n return args\n", "sub_path": "APIs/tpo/requests/get_tpo_queue_data.py", "file_name": "get_tpo_queue_data.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "base.common.models.request.BaseRequestModelKeys", "line_number": 6, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 5, "usage_type": "name"}, {"api_name": "base.common.models.request.SimpleRequestModel", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "182082887", "text": "#!/usr/bin/python\n# coding=utf-8\n\n\nfrom django.template import loader,Context\nfrom django.http import HttpResponse\nfrom gallery.models import Blog\nfrom django.shortcuts import render\nfrom django.db.models import Q\n\ndef Posts(request):\n context = {}\n context['posts'] = Blog.objects.all()\n archive_link = time_line(request)\n context.update(archive_link)\n return render(request,'posts.html',context)\n \n\n\ndef blog_detail(request,id=None):\n context = {}\n blog = Blog.objects.get(pk=id)\n context['blog'] = blog\n# context['id'] = id\n return render(request,'blog_detail.html',context)\n\n\ndef search(request):\n context = {}\n key = request.GET.get('search','')\n context['key'] = key\n context['blogs'] = Blog.objects.filter(title__icontains=key).order_by('-timestamp')\n return render(request,'search.html',context)\n\ndef time_line(request):\n context = {}\n# all_time_line = []\n blogs_list = []\n# blogs = Blog.objects.all()\n blogs= Blog.objects.values('id','title', 'timestamp').order_by('timestamp')\n# for blog in blogs:\n# if blog.timestamp not in all_time_line:\n# all_time_line.append(blog.timestamp)\n# context['all_time_line'] = all_time_line\n dates = set([str(i['timestamp'].year)+str(i['timestamp'].month) for i in blogs])\n for i in dates:\n dic = {}\n b_info = []\n count = 0\n dic['year'] = i[:4]\n dic['month'] = i[4:]\n for obj in blogs:\n if str(obj['timestamp'].year) + str(obj['timestamp'].month) == i:\n dic_ = {}\n dic_['blog'] = obj\n b_info.append(dic_)\n count += 1\n dic['count'] = count\n dic['b_info'] = b_info\n blogs_list.append(dic)\n \n context['dates'] = blogs_list\n# context['blogs'] = blogs\n return context\n # return render(request,'posts.html',context)\n \ndef archive(request):\n context = {}\n post = []\n year = request.GET.get('year','')\n month = request.GET.get('month','')\n blogs = Blog.objects.filter(Q(timestamp__month = month), Q(timestamp__year = year))\n context['posts'] = blogs\n archive_link = time_line(request)\n context.update(archive_link)\n \n return render(request,'posts.html',context)\n", "sub_path": "Quentin/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gallery.models.Blog.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects.values", "line_number": 40, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 40, "usage_type": "name"}, {"api_name": "gallery.models.Blog.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "gallery.models.Blog.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "gallery.models.Blog", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "279629574", "text": "import codecs\nimport os\ntry: # for pip >= 10\n from pip._internal.req import parse_requirements\n from pip._internal.download import PipSession\n from pip._internal.index import PackageFinder\nexcept ImportError: # for pip <= 9.0.3\n from pip.download import PipSession\n from pip.index import PackageFinder\n from pip.req import parse_requirements\nfrom setuptools import find_packages, setup\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\nrequirements_path = os.path.join(root_dir, 'requirements', 'base.txt')\n\nsession = PipSession()\nfinder = PackageFinder([], [], session=session)\nrequirements = parse_requirements(requirements_path, finder, session=session)\ninstall_requires = [r.name for r in requirements]\n\nversion = '2.2.3' # Don't forget to update docs/CHANGELOG.rst if you increment the version\n\nwith codecs.open('README.rst', 'r', 'utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"sbo-sphinx\",\n version=version,\n author=\"Jeremy Bowman\",\n author_email=\"jbowman@safaribooksonline.com\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Software Development :: Documentation',\n ],\n description=\"Sphinx configuration and libraries for Safari Books Online documentation\",\n long_description=long_description,\n url='http://github.com/safarijv/sbo-sphinx',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=install_requires,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pip.download.PipSession", "line_number": 16, "usage_type": "call"}, {"api_name": "pip.index.PackageFinder", "line_number": 17, "usage_type": "call"}, {"api_name": "pip.req.parse_requirements", "line_number": 18, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 23, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 26, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "67977184", "text": "# -*- coding: utf-8 -*-\r\n\r\n# Define your item pipelines here\r\n#\r\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\r\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\r\n\r\n# import datetime\r\n# import redis\r\n# import json\r\n# from scrapy import signals, Request\r\n# from scrapy.exporters import JsonItemExporter\r\n# from scrapy.pipelines.images import ImagesPipeline\r\n# from scrapy.exceptions import DropItem\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nfrom tutorial.items import HuxiuItem\r\nfrom tutorial.model import db_connect, create_news_table, session_scope\r\nfrom tutorial.model.huxiu_model import HuXiuModel\r\nimport logging\r\nlog = logging.getLogger(__name__)\r\n\r\nclass HuxiuPipeline(object):\r\n def __init__(self):\r\n engine = db_connect()\r\n create_news_table(engine)\r\n self.Session = sessionmaker(bind=engine)\r\n\r\n def process_item(self, item, spider):\r\n if isinstance(item, HuxiuItem):\r\n link = item[\"link\"].encode(\"utf-8\")\r\n session = self.Session()\r\n obj = session.query(HuXiuModel).filter(HuXiuModel.link==link).first()\r\n if obj:\r\n if \"published\" in item:\r\n obj.published = item[\"published\"].encode(\"utf-8\")\r\n if \"desc\" in item:\r\n obj.desc = item[\"desc\"].encode(\"utf-8\")\r\n session.add(obj)\r\n session.commit()\r\n else:\r\n published = item[\"published\"].encode(\"utf-8\") if \"published\" in item else \"\"\r\n desc = item[\"desc\"].encode(\"utf-8\") if \"desc\" in item else \"\"\r\n obj = HuXiuModel(\r\n link=link,\r\n title=item[\"title\"].encode(\"utf-8\"),\r\n desc=desc,\r\n published=published,\r\n )\r\n session.add(obj)\r\n session.commit()\r\n # with session_scope(self.Session) as db:\r\n # db.add(obj)\r\n log.info(item)\r\n return item\r\n\r\n def open_spider(self, spider):\r\n \"\"\"This method is called when the spider is opened.\"\"\"\r\n pass\r\n\r\n def close_spider(self, spider):\r\n pass\r\n", "sub_path": "python/scrapy-spider/tutorial/pipeline/huxiu_pipe.py", "file_name": "huxiu_pipe.py", "file_ext": "py", "file_size_in_byte": 2204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "tutorial.model.db_connect", "line_number": 25, "usage_type": "call"}, {"api_name": "tutorial.model.create_news_table", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 27, "usage_type": "call"}, {"api_name": "tutorial.items.HuxiuItem", "line_number": 30, "usage_type": "argument"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel", "line_number": 33, "usage_type": "argument"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel.link", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tutorial.model.huxiu_model.HuXiuModel", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "225534113", "text": "import os\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nimport nibabel as nib\r\nfrom ukbb_cardiac.common.cardiac_utils import get_frames\r\nfrom ukbb_cardiac.common.image_utils import np_categorical_dice\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--output_csv', metavar='csv_name', default='DM_table.csv', required=True)\r\n args = parser.parse_args()\r\n\r\n print('Creating accuracy spreadsheet file ...')\r\n\r\n if os.path.exists(args.output_csv):\r\n os.remove(args.output_csv)\r\n \r\n # Record ED ES frames to csv\r\n init = {'Data': [],\r\n 'EDLV': [],\r\n 'EDLVM': [],\r\n 'EDRV': [],\r\n 'ESLV': [],\r\n 'ESLVM': [],\r\n 'ESRV': [],\r\n }\r\n\r\n df = pd.DataFrame(init)\r\n\r\n root = './demo_image'\r\n folder_list = sorted(os.listdir(root))\r\n\r\n for folder in folder_list:\r\n folder_dir = os.path.join(root, folder)\r\n if os.path.exists('{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder) and ('{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder))\r\n and ('{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder))):\r\n \r\n seg_sa_ED = '{0}/{1}_seg_sa_ED.nii.gz'.format(folder_dir, folder)\r\n seg_sa_ES = '{0}/{1}_seg_sa_ES.nii.gz'.format(folder_dir, folder)\r\n seg_sa_ground_truth = '{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder)\r\n ##seg_sa_ED ='{0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder) # To see Dice metric between same segmentations is 1\r\n \r\n seg_gt = nib.load(seg_sa_ground_truth).get_fdata()\r\n \r\n fr = get_frames(seg_gt, 'sa')\r\n seg_ED_gt = seg_gt[:, :, :, fr['ED']] \r\n seg_ES_gt = seg_gt[:, :, :, fr['ES']] \r\n \r\n dice_arr = np.zeros(6)\r\n ind = 0\r\n \r\n frames = ['ED','ES']\r\n segm = ['LV','LV Myocardium','RV']\r\n for fr in frames:\r\n print('\\nFor image {0}, Comparison between: {1} \\n'.format(folder, fr))\r\n\r\n seg_model = nib.load(seg_sa_ED).get_fdata() if fr == 'ED' else nib.load(seg_sa_ES).get_fdata()\r\n ##if fr == 'ED' : seg_model = seg_model[:,:,:,0] # To see Dice metric between same segmentations is 1\r\n \r\n \r\n for i in range(1,4): # Loop for all segmentations\r\n print('Calculate Dice metric for ',segm[i - 1])\r\n \r\n total_seg_ED = np.sum(seg_ED_gt == i, axis=(0, 1, 2))\r\n print('Seg num (', segm[i-1],') in ground truth ED: ',np.max(total_seg_ED))\r\n total_seg_ES = np.sum(seg_ES_gt == i, axis=(0, 1, 2))\r\n print('Seg num (', segm[i-1],') in ground truth ES: ',np.max(total_seg_ES))\r\n\r\n total_seg = np.sum(seg_model == i, axis=(0, 1, 2))\r\n print('Seg num in model: ', np.max(total_seg))\r\n \r\n #denom = seg_ED_gt.shape[0]* seg_ED_gt.shape[1]* seg_ED_gt.shape[2]\r\n \r\n if fr == 'ED':\r\n dice_metric = np_categorical_dice(seg_model, seg_ED_gt, i) if (total_seg + total_seg_ED > 0) else 0 \r\n else:\r\n dice_metric = np_categorical_dice(seg_model, seg_ES_gt, i) if (total_seg + total_seg_ES > 0) else 0\r\n \r\n print(\"Dice metric for {0}: %\".format(fr) , dice_metric * 100,'\\n')\r\n \r\n dice_arr[ind] = dice_metric * 100\r\n ind += 1\r\n print('{0} finished'.format(folder)) \r\n\r\n frames_dict = {'Data': [folder],\r\n 'EDLV': [dice_arr[0]],\r\n 'EDLVM': [dice_arr[1]],\r\n 'EDRV': [dice_arr[2]],\r\n 'ESLV': [dice_arr[3]],\r\n 'ESLVM': [dice_arr[4]],\r\n 'ESRV': [dice_arr[5]],\r\n }\r\n df1 = pd.DataFrame(frames_dict)\r\n df = df.append(df1, ignore_index = True)\r\n \r\n else:\r\n print('Error! Can not find one of the expected files: {0}/{1}_seg_sa_ED.nii.gz or {0}/{1}_sa_gt.nii.gz'.format(folder_dir, folder))\r\n\r\n df.to_csv(args.output_csv, index = False)", "sub_path": "dice_calculator.py", "file_name": "dice_calculator.py", "file_ext": "py", "file_size_in_byte": 4422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "nibabel.load", "line_number": 44, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.cardiac_utils.get_frames", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 71, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.image_utils.np_categorical_dice", "line_number": 76, "usage_type": "call"}, {"api_name": "ukbb_cardiac.common.image_utils.np_categorical_dice", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "322192585", "text": "from scrapy.spiders import SitemapSpider, CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy import Request\nimport json\n\n\nfrom myproject.items import RestaurantJson\n\nclass TabelogAllSpider(SitemapSpider):\n name = \"tabelog_all\"\n allowed_domains = [\"tabelog.com\"]\n # sitemap_urls = (\n # 'https://tabelog.com/sitemap.xml',\n # )\n #\n # sitemap_rules = [\n # (r'/sitemap_pc_area1_rstlst_\\d+.xml.gz$', 'parse_search_result'),\n # ]\n\n sitemap_rules = (\n (\"https://tabelog.com/sitemap_pc_area1_rstlst_1.xml.gz\", 'parse_search_result'),\n (\"https://tabelog.com/sitemap_pc_area1_rstlst_2.xml.gz\", 'parse_search_result'),\n )\n\n def parse_search_result(self, response):\n \"\"\"\n 地域ページからレストランページへのリンク\n https://tabelog.com/hokkaido/A0104/rstLst/cond04-00-03/RC999909/\n \"\"\"\n\n restaurant_pages = response.css('a.list-rst__rst-name-target cpy-rst-name').extract()\n if restaurant_pages is not None:\n for page in restaurant_pages:\n yield Request(response.urljoin(page), callback=self.parse_restaurant)\n\n def parse_restaurant(self, response):\n\n \"\"\"\n レストランページからJSON-LD\n :param response:\n :return:\n \"\"\"\n\n restaurant_json = json.loads(response.css('script[type=\"application/ld+json\"]').xpath('string()').extract_first())\n\n item = RestaurantJson(\n keyword = restaurant_json['name'],\n target = restaurant_json['@id']\n )\n\n yield item\n\n", "sub_path": "python/scrapy/myproject/myproject/spiders/tabelog_all.py", "file_name": "tabelog_all.py", "file_ext": "py", "file_size_in_byte": 1602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scrapy.spiders.SitemapSpider", "line_number": 9, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "myproject.items.RestaurantJson", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "186203042", "text": "\"\"\"\n.. module:: clustermixin\n :platform: Darwin, Linux, Unix, Windows\n :synopsis: Contains a ClusterMixIn object to use for working with the nodes of a cluster\n\n.. moduleauthor:: Myron Walker \n\"\"\"\n\n__author__ = \"Myron Walker\"\n__copyright__ = \"Copyright 2020, Myron W Walker\"\n__credits__ = []\n__version__ = \"1.0.0\"\n__maintainer__ = \"Myron Walker\"\n__email__ = \"myron.walker@gmail.com\"\n__status__ = \"Development\" # Prototype, Development or Production\n__license__ = \"MIT\"\n\nfrom typing import Dict, List, Tuple\n\nfrom akit.mixins.integration import IntegrationMixIn\n\nclass ClusterMixIn(IntegrationMixIn):\n \"\"\"\n This is a mock playback device.\n \"\"\"\n\n pathbase = \"/clusters\"\n\n def __init__(self, *args, role=None, **kwargs):\n \"\"\"\n The default contructor for an :class:`IntegrationMixIn`.\n \"\"\"\n super(ClusterMixIn, self).__init__(*args, role=role, **kwargs)\n\n if self.pathbase is None:\n raise ValueError(\"The 'pathbase' class member variable must be set to a unique name for each integration class type.\")\n\n self.context.insert(self.pathbase , self)\n return\n\n @classmethod\n def attach_to_environment(cls, constaints: Dict={}):\n \"\"\"\n This API is called so that the IntegrationMixIn can process configuration information. The :class:`IntegrationMixIn`\n will verify that it has a valid environment and configuration to run in.\n\n :raises :class:`akit.exceptions.AKitMissingConfigError`, :class:`akit.exceptions.AKitInvalidConfigError`:\n \"\"\"\n return\n\n @classmethod\n def collect_resources(cls):\n \"\"\"\n This API is called so the `IntegrationMixIn` can connect with a resource management\n system and gain access to the resources required for the automation run.\n\n :raises :class:`akit.exceptions.AKitResourceError`:\n \"\"\"\n\n return\n\n @classmethod\n def diagnostic(cls, diag_level: int, diag_folder: str):\n \"\"\"\n The API is called by the :class:`akit.sequencer.Sequencer` object when the automation sequencer is\n building out a diagnostic package at a diagnostic point in the automation sequence. Example diagnostic\n points are:\n\n * pre-run\n * post-run\n\n Each diagnostic package has its own storage location so derived :class:`akit.scope.ScopeMixIn` objects\n can simply write to their specified output folder.\n\n :param diag_level: The maximum diagnostic level to run dianostics for.\n :param diag_folder: The output folder path where the diagnostic information should be written.\n \"\"\"\n\n return\n\n @classmethod\n def establish_connectivity(cls) -> Tuple[List[str], Dict]:\n \"\"\"\n This API is called so that this subclass of the `IntegrationMixIn` can establish connectivity with any\n compute or storage resources.\n\n :raises :class:`akit.exceptins.AKitInitialConnectivityError`:\n \"\"\"\n\n return\n\n @classmethod\n def establish_presence(cls) -> Tuple[List[str], Dict]:\n \"\"\"\n This API is called so the `IntegrationMixIn` can establish presence with any compute or storage\n resources.\n\n :returns: A tuple with a list of error messages for failed connections and dict of connectivity\n reports for devices devices based on the coordinator.\n \"\"\"\n return", "sub_path": "packages/akit/integration/cluster/clustermixin.py", "file_name": "clustermixin.py", "file_ext": "py", "file_size_in_byte": 3516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "akit.mixins.integration.IntegrationMixIn", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 93, "usage_type": "name"}]}
+{"seq_id": "68642674", "text": "#! python3\r\n# downloadXkcd.py - Dowloads every single XKCD comic\r\n\r\nimport requests, os, bs4\r\n\r\nurl = 'http://xkcd.com' # starting url\r\nos.chdir('C:\\\\Users\\\\Mack W\\\\Documents\\\\Python\\\\' +\r\n 'automateTheBoringStuffWithPython\\\\' +\r\n 'Chapter 11 Web Scraping\\\\XKCD Comics')\r\nwhile not url.endswith('#'):\r\n # Download the page\r\n print('Page: ', end='')\r\n print(url)\r\n res = requests.get(url)\r\n res.raise_for_status()\r\n soup = bs4.BeautifulSoup(res.text)\r\n \r\n # Find the URL of the comic image\r\n comicElem = soup.select('#comic img')\r\n if comicElem == []:\r\n print('Could not find comic image.')\r\n else:\r\n comicUrl = 'http:' + comicElem[0].get('src')\r\n \r\n # Download the image\r\n print('Image: ', end='')\r\n print(comicUrl)\r\n res = requests.get(comicUrl)\r\n res.raise_for_status()\r\n \r\n # Save the image\r\n imageName = os.path.basename(comicUrl)\r\n imageFile = open(imageName, 'wb')\r\n for chunk in res.iter_content(100000):\r\n imageFile.write(chunk)\r\n imageFile.close()\r\n \r\n # Get the Prev button's url\r\n prevLink = soup.select('a[rel=\"prev\"]')[0]\r\n url = 'http://xkcd.com' + prevLink.get('href')\r\n\r\nprint('Done.')\r\n", "sub_path": "Command Line Programs/downloadXkcd.py", "file_name": "downloadXkcd.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}]}
+{"seq_id": "377362762", "text": "import kivy\nimport sqlite3\nimport json\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.spinner import Spinner\nfrom kivy.uix.widget import Widget\n#from.kivy.properties import ObjectProperty\n\n\nreadedNote = None\npriorToSearch=10\nnowEditing=10\nseenNotes = list()\nlastId = 0\nindex = 0\n\n#Builder.load_file('style.kv')\n\nclass MyGrid(Widget):\n pass\n def __init__(self, **kwargs):\n super(MyGrid, self).__init__(**kwargs)\n\n self.addButton.bind(on_press=self.add)\n self.buttonDid.bind(on_press=self.didIt)\n self.buttonLater.bind(on_press=self.doItLater)\n\n self.aboutBt.bind(on_press=self.about)\n self.showAllBt.bind(on_press=self.showAll)\n\n configFile = open(\"config.json\", \"r\")\n config = json.load(configFile)\n\n self.addButton.text = config.get(\"addButton\", \"+\")\n self.buttonDid.text = config.get(\"doneButton\", \"DONE\")\n self.buttonLater.text = config.get(\"laterButton\", \"LATER\")\n self.showAllBt.text = config.get(\"showAllButton\", \"SHOW ALL\")\n self.aboutBt.text = config.get(\"aboutButton\", \"ABOUT\")\n self.settingsBt.text = config.get(\"settingsButton\", \"SETTINGS\")\n\n self.findNew(self)\n\n\n def about(self, obj):\n global index\n self.noteLabel.text = \"By: Marek Maskarinec \\n version: 0.1\"\n index = 0\n\n def showAll(self, obj):\n global index\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n print(cards)\n if cards is not []:\n for i in range(len(cards)):\n self.noteLabel.text += cards[i][0] + \"\\n\" + str(cards[i][1]) + \"\\n\" + \"--------\" + \"\\n\"\n else:\n self.noteLabel.text = \"No notes\"\n conn.close()\n index = 0\n\n def add(self, obj):\n global index\n noteText = self.addTextBox.text\n priority = int(self.priorityBox.text )\n print(priority)\n print(noteText)\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n #c.execute(\"\"\"CREATE TABLE cards\n # (note text, priority number)\"\"\")\n c.execute(\"INSERT INTO cards VALUES (?, ?)\", (noteText, priority))\n c.execute(\"SELECT * FROM cards ORDER BY priority DESC\")\n conn.commit()\n conn.close()\n index = 0\n #nowEditing = nowEditing - 1\n\n def didIt(self, obj):\n global index\n global priorToSearch\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n if len(cards) > 0 and index < len(cards):\n c.execute('DELETE FROM cards WHERE note=?', (cards[index][0],))\n conn.commit()\n conn.close()\n if len(cards) > 0:\n self.findNew(obj)\n #index += 1\n\n def doItLater(self, obj):\n global index\n self.findNew(obj)\n index += 1\n\n def findNew(self, obj):\n global index\n print(index)\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards ORDER BY priority DESC\")\n #c.execute(\"SELECT * FROM cards\")\n cards = c.fetchall()\n print(cards)\n conn.close()\n if index >= len(cards):\n index = 0\n\n if len(cards) > 0:\n self.noteLabel.text = cards[index][0]\n else:\n self.noteLabel.text = \"no notes\"\n \"\"\"global priorToSearch\n global seenNotes\n global lastId\n cycleNumber = 0\n while True:\n conn = sqlite3.connect('cards.db')\n c = conn.cursor()\n c.execute(\"SELECT * FROM cards WHERE priority=?\", (priorToSearch,))\n recordRow = c.fetchone()\n if recordRow is not None:\n c.execute(\"SELECT rowid, * FROM cards WHERE note=?\", (recordRow[0],))\n lastId = c.fetchone\n else:\n self.noteLabel.text = \"Create note first\"\n\n if recordRow is None:\n self.noteLabel.text = \"\"\n priorToSearch = priorToSearch - 1\n if priorToSearch < 1:\n if cycleNumber == 1:\n break\n else:\n priorToSearch = 10\n cycleNumber = cycleNumber + 1\n #seenNotes = list()\n else:\n print(seenNotes)\n print(lastId)\n if lastId not in seenNotes:\n print(recordRow)\n self.noteLabel.text = recordRow[0]\n #priorityLabel.config(text=recordRow[1])\n seenNotes.append(lastId)\n break\n else:\n priorToSearch = priorToSearch - 1\n\n conn.commit()\n conn.close()\n priorToSearch = priorToSearch -1\"\"\"\n\n\nclass Window(App):\n\n def build(self):\n return MyGrid()\n\nwindow = Window()\n\nwindow.run()\n", "sub_path": "taskDeckAndoid.py", "file_name": "taskDeckAndoid.py", "file_ext": "py", "file_size_in_byte": 5191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "kivy.uix.widget.Widget", "line_number": 24, "usage_type": "name"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 75, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 109, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 165, "usage_type": "name"}]}
+{"seq_id": "115505475", "text": "import os\nimport logging\nimport logging.handlers\nimport errno\n\n\ndef mkdir_p(path):\n \"\"\"http://stackoverflow.com/a/600612/190597 (tzot)\"\"\"\n try:\n os.makedirs(path, exist_ok=True) # Python>3.2\n except TypeError:\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\n\nclass LinkFileHandler(logging.handlers.RotatingFileHandler):\n def __init__(self, filename, mode='a', encoding=None, delay=0, maxBytes=10485760, backupCount=2):\n mkdir_p(os.path.dirname(filename))\n logging.FileHandler.__init__(self, filename, mode, encoding, delay)\n\n\ndef Logger():\n # Sets log file path.\n log_file = os.path.join(os.path.dirname(__file__), 'logs/datil_link.log')\n \n # Return a logger with the specified name.\n mylogger = logging.getLogger(\"MyLogger\")\n handler = LinkFileHandler(log_file, maxBytes=10485760, backupCount=2)\n handler = logging.handlers.RotatingFileHandler(log_file)\n\n # Sets the threshold for this logger to lvl. Logging messages which are less severe than lvl will be ignored.\n mylogger.setLevel(logging.DEBUG)\n \n # Sets format of record in log file\n formatter = logging.Formatter('%(asctime)s - %(module)-10s - %(levelname)-8s %(message)s', '%d-%m-%Y %H:%M:%S')\n handler.setFormatter(formatter)\n \n # Adds the specified handler to logger \"MyLogger\"\n mylogger.addHandler(handler)\n\n return mylogger\n\n\nmylogger = Logger()\n", "sub_path": "logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.makedirs", "line_number": 10, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.handlers", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.FileHandler.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "108828403", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom hamcrest import (assert_that, equal_to, is_, is_not, contains,\n has_entries, starts_with, has_length) # noqa: H310\nimport requests\nfrom stepler import base\nfrom stepler.third_party import waiter\n\nfrom vapor import settings\n\n\nclass LBaaSSteps(base.BaseSteps):\n \"\"\"LBaaS steps.\"\"\"\n\n def _check_presence(self, objs, list_method, expected_presence, timeout=0):\n def _check_presence():\n all_objs = list_method()\n matcher = is_\n if not expected_presence:\n matcher = is_not\n return waiter.expect_that(\n all_objs,\n matcher(\n contains(*[has_entries(id=obj['id']) for obj in objs])))\n\n waiter.wait(_check_presence, timeout_seconds=timeout)\n\n def create_lb(self, name, subnet, **kwargs):\n \"\"\"Create loadbalancer and wait it became to online.\"\"\"\n loadbalancer = self._client.lbaas_loadbalancers.create(\n name=name, vip_subnet_id=subnet['id'], **kwargs)\n\n loadbalancer = self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return loadbalancer\n\n def delete_lbs(self, loadbalancers):\n \"\"\"Delete loadbalancer and wait for deletion to be completed.\"\"\"\n for loadbalancer in loadbalancers:\n self._client.lbaas_loadbalancers.delete(loadbalancer['id'])\n\n self.check_lbs_presence(\n loadbalancers, timeout=settings.LBAAS_DELETE_TIMEOUT)\n\n def check_lb_provisioning_status(self,\n loadbalancer,\n expected_status=\"ACTIVE\",\n timeout=0):\n \"\"\"Check that loadbalancer has expected provisioning status.\"\"\"\n\n def _check_status():\n lb = self._client.lbaas_loadbalancers.get(loadbalancer['id'])\n waiter.expect_that(lb['provisioning_status'],\n is_not(starts_with('PENDING_')))\n return lb\n\n loadbalancer = waiter.wait(_check_status, timeout_seconds=timeout)\n assert_that(loadbalancer['provisioning_status'],\n equal_to(expected_status))\n return loadbalancer\n\n def check_lbs_presence(self,\n loadbalancers,\n expected_presence=True,\n timeout=0):\n \"\"\"Check that loadbalancer is present (or not).\"\"\"\n self._check_presence(\n loadbalancers,\n self._client.lbaas_loadbalancers.list,\n expected_presence,\n timeout=timeout)\n\n def cleanup_lbs(self, names):\n \"\"\"Remove all loadbalancers by names list.\"\"\"\n loadbalancers = []\n for name in names:\n for loadbalancer in self._client.lbaas_loadbalancers.find_all(\n name=name):\n loadbalancers.append(loadbalancer)\n self._client.lbaas_loadbalancers.delete(loadbalancer['id'])\n\n self.check_lbs_presence(\n loadbalancers,\n expected_presence=False,\n timeout=settings.LBAAS_DELETE_TIMEOUT)\n\n def create_listener(self, name, loadbalancer, protocol, protocol_port,\n **kwargs):\n \"\"\"Create LBaaS listener.\"\"\"\n listener = self._client.lbaas_listeners.create(\n name=name,\n loadbalancer_id=loadbalancer['id'],\n protocol=protocol,\n protocol_port=protocol_port,\n **kwargs)\n\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return listener\n\n def delete_listener(self, listener):\n \"\"\"Delete LBaaS listener.\"\"\"\n listener = self._client.lbaas_listeners.get(listener['id'])\n loadbalancers = listener['loadbalancers']\n self._client.lbaas_listeners.delete(listener['id'])\n for lb in loadbalancers:\n self.check_lb_provisioning_status(\n lb, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def cleanup_listeners(self, names):\n \"\"\"Remove all listeners by names list.\"\"\"\n for name in names:\n for listener in self._client.lbaas_listeners.find_all(name=name):\n self.delete_listener(listener)\n\n def create_pool(self, name, listener, protocol, lb_algorithm, **kwargs):\n \"\"\"Create LBaaS pool.\"\"\"\n pool = self._client.lbaas_pools.create(\n name=name,\n listener_id=listener['id'],\n protocol=protocol,\n lb_algorithm=lb_algorithm,\n **kwargs)\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return pool\n\n def delete_pool(self, pool):\n \"\"\"Create LBaaS pool.\"\"\"\n self._client.lbaas_pools.delete(pool['id'])\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def cleanup_pools(self, names):\n \"\"\"Remove all pools by names list.\"\"\"\n loadbalancers = []\n for name in names:\n for pool in self._client.lbaas_pools.find_all(name=name):\n self._client.lbaas_pools.delete(pool['id'])\n loadbalancers.extend(pool['loadbalancers'])\n\n for loadbalancer in loadbalancers:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def create_member(self, pool, address, protocol_port, subnet, **kwargs):\n \"\"\"Create LBaaS pool member.\"\"\"\n member = pool.members.create(\n address=address,\n protocol_port=protocol_port,\n subnet_id=subnet['id'],\n **kwargs)\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n return member\n\n def delete_member(self, pool, member):\n \"\"\"Delete LBaaS pool member.\"\"\"\n pool.members.delete(member['id'])\n\n for loadbalancer in pool['loadbalancers']:\n self.check_lb_provisioning_status(\n loadbalancer, timeout=settings.LBAAS_ONLINE_TIMEOUT)\n\n def check_balancing(self, ip, port, expected_count):\n \"\"\"Check that responses contains `expected_counts` variants.\"\"\"\n responses = set()\n for _ in range(expected_count * 3):\n r = requests.get(\"http://{}:{}/\".format(ip, port))\n r.raise_for_status()\n responses.add(r.text)\n assert_that(responses, has_length(expected_count))\n", "sub_path": "plugin_test/vapor/vapor/helpers/lbaas.py", "file_name": "lbaas.py", "file_ext": "py", "file_size_in_byte": 7280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "stepler.base.BaseSteps", "line_number": 22, "usage_type": "attribute"}, {"api_name": "stepler.base", "line_number": 22, "usage_type": "name"}, {"api_name": "hamcrest.is_", "line_number": 28, "usage_type": "name"}, {"api_name": "hamcrest.is_not", "line_number": 30, "usage_type": "name"}, {"api_name": "stepler.third_party.waiter.expect_that", "line_number": 31, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 31, "usage_type": "name"}, {"api_name": "hamcrest.contains", "line_number": 34, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 34, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter.wait", "line_number": 36, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 36, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_DELETE_TIMEOUT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 54, "usage_type": "name"}, {"api_name": "stepler.third_party.waiter.expect_that", "line_number": 64, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 64, "usage_type": "name"}, {"api_name": "hamcrest.is_not", "line_number": 65, "usage_type": "call"}, {"api_name": "hamcrest.starts_with", "line_number": 65, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter.wait", "line_number": 68, "usage_type": "call"}, {"api_name": "stepler.third_party.waiter", "line_number": 68, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 69, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 70, "usage_type": "call"}, {"api_name": "vapor.settings.LBAAS_DELETE_TIMEOUT", "line_number": 96, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 96, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 109, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 120, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 120, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 139, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 139, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 148, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 148, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 160, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 160, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 172, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 172, "usage_type": "name"}, {"api_name": "vapor.settings.LBAAS_ONLINE_TIMEOUT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "vapor.settings", "line_number": 182, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 188, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 191, "usage_type": "call"}, {"api_name": "hamcrest.has_length", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "441899619", "text": "import numpy as np\nimport cv2\nimport math\n\n\ndef get_primary_background_color(img):\n \"\"\"\n Returns the primarily used color in the images, which is assumed to be the background color.\n\n :param img: this is the image\n :returns the primary hue tone.\n :rtype int\n \"\"\"\n\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n # get most occurring color\n background_color = hist.argmax(axis=0)\n\n return background_color\n\n\ndef get_background_spot(img, background_color, spot_size=200):\n \"\"\"\n Returns a position in the image, which is the most similar spot to the background color.\n\n :param img: this is the image\n :param background_color: this is the background color\n :param spot_size: the size of the searched spot.\n The higher the value, the slower the search and up to a certain size more stable\n :returns x, y coordinate of the background spot.\n :rtype tuple\n \"\"\"\n\n spot_template = np.zeros((spot_size, spot_size, 3), np.uint8)\n spot_template[:, :, 0] = background_color\n spot_template[:, :, 1] = background_color\n spot_template[:, :, 2] = background_color\n\n # find big background spot\n method = cv2.TM_SQDIFF_NORMED\n result = cv2.matchTemplate(spot_template, img, method)\n # We want the minimum squared difference\n mn, _, location, _ = cv2.minMaxLoc(result)\n\n return location\n\n\ndef generate_binary_background_image(img, background_color, threshold=25):\n \"\"\"\n Returns a binary image, which where the background color with some threshold is separated from the rest.\n\n :param img: this is the image\n :param background_color: this is the background color\n :param threshold: the threshold around the primary background color, which still should belong to the background.\n :returns: binary image.\n :rtype: array\n \"\"\"\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, mask1 = cv2.threshold(gray, background_color + threshold, 255, 0)\n ret, mask2 = cv2.threshold(gray, background_color - threshold, 255, 0)\n combined = cv2.bitwise_not(cv2.bitwise_and(mask1, mask2))\n\n return combined\n\n\ndef separate_background(binary_background_img, background_location):\n \"\"\"\n Returns a binary image, where the background ist black and the image locations are white.\n\n :param binary_background_img: binary version of the image\n :param background_location: a location (x,y) where there is some background\n :returns: binary image. \n :rtype: array \n \"\"\"\n\n im_floodfill = binary_background_img.copy()\n h, w = binary_background_img.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n\n cv2.floodFill(im_floodfill, mask, background_location, 128)\n\n im_floodfill[im_floodfill == 0] = 255\n im_floodfill[im_floodfill == 128] = 0\n\n return im_floodfill\n\n\ndef check_for_features(img, threshold=10):\n \"\"\"\n Returns true or false dependent on the amount of features (corners and edges) which are in the image.\n Used to remove images without content (only background).\n\n :param img: input image\n :param threshold: the necessary amount of features needed to be regarded as image\n :returns: boolean, if image as enough features \n :rtype: bool \n \"\"\"\n\n blur1 = cv2.GaussianBlur(img, (7, 7), 0)\n blur2 = cv2.GaussianBlur(img, (15, 15), 0)\n gradients = blur1 - blur2\n\n pixel_sum = np.sum(gradients[0:img.shape[0]-1, 0:img.shape[1]-1, 0:img.shape[2]-1])\n average = pixel_sum / (img.shape[0] * img.shape[1] * img.shape[2])\n\n return average > threshold\n\n\ndef crop_image_rectangles(img, binary_background_image, min_area=-100, max_dimension_relation=2.5, image_padding=10):\n \"\"\"\n Returns an array of images, which are cut out of the original image.\n The cut is based on the binary background image.\n During this process unrelevant (to small, to monoton, ...) images are sorted out.\n\n :param img: input image\n :param binary_background_image: binary image showing where background and where foreground is.\n :param min_area: the size(area) an image must at least have to be considered as an image.\n :param max_dimension_relation: the maximum relation between the width and the height of an image\n (-> strips are not allowed)\n :param image_padding: the padding with which image is cut out of the original photo.\n :returns: an array of all the images in the scan\n :rtype: array \n \"\"\"\n\n # initialize output images\n cropped_images = []\n\n im2, contours, hierarchy = cv2.findContours(binary_background_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n album_image_height = binary_background_image.shape[0]\n album_image_width = binary_background_image.shape[1]\n \n if min_area < 0:\n min_area = album_image_height * album_image_width / (-min_area)\n\n count_ignored_because_corner_distance = 0\n count_ignored_because_min_area = 0\n count_ignored_because_total_album = 0\n count_ignored_because_dimension_relation = 0\n\n for i in range(len(contours)):\n # the virtual corners correspond to the edges if every point should be in the image\n # the real corners are the contour points which are closest to the edge\n\n virtual_corners = [[album_image_width, album_image_height], [0, 0]]\n real_corners = [[album_image_width, album_image_height], [0, 0]]\n\n for j in range(len(contours[i])):\n if virtual_corners[0][0] > contours[i][j][0][0]:\n virtual_corners[0][0] = contours[i][j][0][0]\n if virtual_corners[0][1] > contours[i][j][0][1]:\n virtual_corners[0][1] = contours[i][j][0][1]\n if virtual_corners[1][0] < contours[i][j][0][0]:\n virtual_corners[1][0] = contours[i][j][0][0]\n if virtual_corners[1][1] < contours[i][j][0][1]:\n virtual_corners[1][1] = contours[i][j][0][1]\n\n if real_corners[0][0] + real_corners[0][1] > contours[i][j][0][0] + contours[i][j][0][1]:\n real_corners[0][0] = contours[i][j][0][0]\n real_corners[0][1] = contours[i][j][0][1]\n\n if real_corners[1][0] + real_corners[1][1] < contours[i][j][0][0] + contours[i][j][0][1]:\n real_corners[1][0] = contours[i][j][0][0]\n real_corners[1][1] = contours[i][j][0][1]\n\n # check if virtual corners are near real corners\n max_corner_distance = math.sqrt(album_image_width*album_image_width\n + album_image_height*album_image_height) / 20\n\n corner_distance_topleft = math.sqrt(math.pow(real_corners[1][0] - virtual_corners[1][0], 2)\n + math.pow(real_corners[1][1] - virtual_corners[1][1], 2))\n\n corner_distance_bottomright = math.sqrt(math.pow(real_corners[0][0] - virtual_corners[0][0], 2)\n + math.pow(real_corners[0][1] - virtual_corners[0][1], 2))\n\n if corner_distance_topleft > max_corner_distance or corner_distance_bottomright > max_corner_distance:\n count_ignored_because_corner_distance += 1\n continue\n\n image_width = abs(real_corners[0][0] - real_corners[1][0])\n image_height = abs(real_corners[0][1] - real_corners[1][1])\n image_area = abs(image_width * image_height)\n\n # dont save images that are the whole album image\n if img.shape[0] < image_height * 1.1 and img.shape[1] < image_width * 1.1:\n count_ignored_because_total_album += 1\n continue\n\n # dont save images, that are to small\n if image_area < min_area:\n count_ignored_because_min_area += 1\n continue\n\n # dont save images, that have weird dimensions\n if image_height/image_width > max_dimension_relation or image_width/image_height > max_dimension_relation:\n count_ignored_because_dimension_relation += 1\n continue\n\n # if there is enough space add some padding\n if real_corners[0][1] - image_padding > 0:\n real_corners[0][1] -= image_padding\n if real_corners[0][0] - image_padding > 0:\n real_corners[0][0] -= image_padding\n if real_corners[1][1] + image_padding < img.shape[0]:\n real_corners[1][1] += image_padding\n if real_corners[1][0] + image_padding < img.shape[1]:\n real_corners[1][0] += image_padding\n\n crop = img[real_corners[0][1]:real_corners[1][1],real_corners[0][0]:real_corners[1][0]]\n cropped_images.append(crop)\n\n return cropped_images\n\n\ndef validate_cropped_images(cropped_images, feature_threshold):\n \"\"\"\n Validated the cropped image by checking for feature.\n\n :param feature_threshold: the necessary amount of features needed to be regarded as image\n :param cropped_images: array - An array of cropped images\n :return: An array of validated cropped images\n :rtype array\n \"\"\"\n valid_cropped_images = []\n\n for image in cropped_images:\n enough_features = check_for_features(image, feature_threshold)\n if enough_features:\n valid_cropped_images.append(image)\n\n return valid_cropped_images\n", "sub_path": "imextract/backgroundremover.py", "file_name": "backgroundremover.py", "file_ext": "py", "file_size_in_byte": 9186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.calcHist", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.TM_SQDIFF_NORMED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.matchTemplate", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.minMaxLoc", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.floodFill", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 168, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 171, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 171, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 172, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 174, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 174, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 175, "usage_type": "call"}]}
+{"seq_id": "382957105", "text": "# Estimate extrinsic camera parameters\n#\n# camera calibration for distorted images with chess board samples\n#\n# based on: https://github.com/opencv/opencv/blob/master/samples/python/calibrate.py\n#\n# Dylan Campbell \n\nimport os\nimport cv2 as cv\nimport argparse\nimport json\nimport pickle\nimport subprocess\nimport numpy as np\nfrom glob import glob\nfrom pdb import set_trace as st\n\nscenes = ['01', '02', '04', '05', '06', '07', '08', '09', '10', '11']\ncams = ['dev1', 'dev2', 'dev3']\n\ndef get_extrinsic_parameters(args):\n calib_path = os.path.join(args.dataset_dir, 'Calibration')\n for scene in scenes:\n scene_path = os.path.join(calib_path, scene)\n \n cam = 'dev2' # Compute cameras w.r.t dev2\n cam_path = os.path.join(scene_path, cam)\n img_mask = os.path.join(cam_path, 'images', '????.png')\n img_names = glob(img_mask)\n color_param_filename = os.path.join(cam_path, 'ColorIns.txt')\n rgb_ins_params = get_rgb_ins_params(color_param_filename)\n\n cam1 = 'dev1' # Compute cameras w.r.t dev2\n cam_path1 = os.path.join(scene_path, cam1)\n img_mask1 = os.path.join(cam_path1, 'images', '????.png')\n img_names1 = glob(img_mask1)\n cam3 = 'dev3' # Compute cameras w.r.t dev2\n cam_path3 = os.path.join(scene_path, cam3)\n img_mask3 = os.path.join(cam_path3, 'images', '????.png')\n img_names3 = glob(img_mask3)\n\n pattern_size = (4, 3) # Number of inner corners per a chessboard row and column\n pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)\n pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)\n pattern_points *= args.square_size\n\n obj_points = []\n img_points = []\n img_points1 = []\n img_points3 = []\n h, w = cv.imread(img_names[0], cv.IMREAD_GRAYSCALE).shape[:2]\n\n def processImage(fn):\n # print('processing %s... ' % fn)\n img = cv.imread(fn, 0)\n if img is None:\n print(\"Failed to load\", fn)\n return None\n\n # img = cv.flip(img, 1) # Flip LR\n # cv.imwrite(fn, img)\n\n assert w == img.shape[1] and h == img.shape[0], (\"size: %d x %d ... \" % (img.shape[1], img.shape[0]))\n found, corners = cv.findChessboardCorners(img, pattern_size)\n if found:\n term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)\n cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)\n if args.debug_dir:\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.drawChessboardCorners(vis, pattern_size, corners, found)\n _, name, _ = splitfn(fn)\n outfile = os.path.join(args.debug_dir, name + '_chess.png')\n cv.imwrite(outfile, vis)\n if not found:\n print('chessboard not found')\n return None\n # print(' %s... OK' % fn)\n return (corners.reshape(-1, 2), pattern_points)\n\n threads_num = args.num_threads\n if threads_num <= 1:\n chessboards1 = [processImage(fn) for fn in img_names1]\n chessboards3 = [processImage(fn) for fn in img_names3]\n chessboards = [processImage(fn) for fn in img_names]\n else:\n # print(\"Run with %d threads...\" % threads_num)\n from multiprocessing.dummy import Pool as ThreadPool\n pool = ThreadPool(threads_num)\n chessboards1 = pool.map(processImage, img_names1)\n chessboards3 = pool.map(processImage, img_names3)\n chessboards = pool.map(processImage, img_names)\n\n chessboards = [x for x in chessboards if x is not None]\n chessboards1 = [x for x in chessboards1 if x is not None]\n chessboards3 = [x for x in chessboards3 if x is not None]\n for (corners, pattern_points) in chessboards:\n img_points.append(corners)\n obj_points.append(pattern_points)\n for (corners, pattern_points) in chessboards1:\n img_points1.append(corners)\n for (corners, pattern_points) in chessboards3:\n img_points3.append(corners)\n\n # Calibrate cameras:\n camera_matrix_gt = np.float32(np.array([[rgb_ins_params[\"fx\"], 0.0, rgb_ins_params[\"cx\"]], [0.0, rgb_ins_params[\"fy\"], rgb_ins_params[\"cy\"]], [0.0, 0.0, 1.0]])) # fx and fy \n dist_coefs_gt = np.float32(np.array([0.0, 0.0, 0.0, 0.0]))\n flags=cv.CALIB_USE_INTRINSIC_GUESS + cv.CALIB_FIX_PRINCIPAL_POINT + cv.CALIB_FIX_ASPECT_RATIO + cv.CALIB_ZERO_TANGENT_DIST + cv.CALIB_FIX_K1 + cv.CALIB_FIX_K2 + cv.CALIB_FIX_K3 + cv.CALIB_FIX_K4 + cv.CALIB_FIX_K5 + cv.CALIB_FIX_K6\n rms, camera_matrix, dist_coefs, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n rms1, camera_matrix1, dist_coefs1, rvecs1, tvecs1 = cv.calibrateCamera(obj_points, img_points1, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n rms3, camera_matrix3, dist_coefs3, rvecs3, tvecs3 = cv.calibrateCamera(obj_points, img_points3, (w, h), camera_matrix_gt, dist_coefs_gt, flags=flags)\n\n # if debug: undistort the image with the calibration\n for fn in img_names if args.debug_dir else []:\n _path, name, _ext = splitfn(fn)\n img_found = os.path.join(args.debug_dir, name + '_chess.png')\n outfile = os.path.join(args.debug_dir, name + '_undistorted.png')\n img = cv.imread(img_found)\n if img is None:\n continue\n h, w = img.shape[:2]\n newcameramtx, roi = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))\n dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx)\n # crop and save the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n print('Undistorted image written to: %s' % outfile)\n cv.imwrite(outfile, dst)\n\n flags=cv.CALIB_FIX_INTRINSIC\n rms, camera_matrix, dist_coefs, camera_matrix1, dist_coefs1, R21, T21, _, _ = cv.stereoCalibrate(obj_points, img_points, img_points1, camera_matrix, dist_coefs, camera_matrix1, dist_coefs1, (w, h), flags=flags)\n rms, camera_matrix, dist_coefs, camera_matrix3, dist_coefs3, R23, T23, _, _ = cv.stereoCalibrate(obj_points, img_points, img_points3, camera_matrix, dist_coefs, camera_matrix3, dist_coefs3, (w, h), flags=flags)\n\n camera_parameters = {\n \"K\": camera_matrix,\n \"dist_coefs\": dist_coefs,\n \"R\": np.eye(3),\n \"T\": np.zeros((3,1))\n }\n with open(os.path.join(cam_path, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\n camera_parameters = {\n \"K\": camera_matrix1,\n \"dist_coefs\": dist_coefs1,\n \"R\": R21,\n \"T\": T21\n }\n with open(os.path.join(cam_path1, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path1, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\n camera_parameters = {\n \"K\": camera_matrix3,\n \"dist_coefs\": dist_coefs3,\n \"R\": R23,\n \"T\": T23\n }\n with open(os.path.join(cam_path3, 'camera_parameters.pkl'), 'wb') as f:\n pickle.dump(camera_parameters, f)\n with open(os.path.join(cam_path3, 'camera_parameters.json'), 'w') as outfile:\n camera_parameters_serialized = {key: value.tolist() for key, value in camera_parameters.items()}\n json.dump(camera_parameters_serialized, outfile)\n\ndef splitfn(fn):\n path, fn = os.path.split(fn)\n name, ext = os.path.splitext(fn)\n return path, name, ext\n\ndef get_rgb_ins_params(param_file):\n '''\n read the rgb intrinsic parameters file\n :param param_file: path to intrinsic parameters file\n :return:\n rgb_ins_params: a libfreenect2 ColorCameraParams object\n '''\n with open(param_file, 'r') as f:\n rgb_ins_params = [float(line.strip()) for line in f if line]\n\n rgb_camera_params_obj = {\n \"fx\" : rgb_ins_params[0],\n \"fy\" : rgb_ins_params[1],\n \"cx\" : rgb_ins_params[2],\n \"cy\" : rgb_ins_params[3],\n\n \"shift_d\" : rgb_ins_params[4],\n \"shift_m\" : rgb_ins_params[5],\n \"mx_x3y0\" : rgb_ins_params[6],\n \"mx_x0y3\" : rgb_ins_params[7],\n \"mx_x2y1\" : rgb_ins_params[8],\n \"mx_x1y2\" : rgb_ins_params[9],\n \"mx_x2y0\" : rgb_ins_params[10],\n \"mx_x0y2\" : rgb_ins_params[11],\n \"mx_x1y1\" : rgb_ins_params[12],\n \"mx_x1y0\" : rgb_ins_params[13],\n \"mx_x0y1\" : rgb_ins_params[14],\n \"mx_x0y0\" : rgb_ins_params[15],\n\n \"my_x3y0\" : rgb_ins_params[16],\n \"my_x0y3\" : rgb_ins_params[17],\n \"my_x2y1\" : rgb_ins_params[18],\n \"my_x1y2\" : rgb_ins_params[19],\n \"my_x2y0\" : rgb_ins_params[20],\n \"my_x0y2\" : rgb_ins_params[21],\n \"my_x1y1\" : rgb_ins_params[22],\n \"my_x1y0\" : rgb_ins_params[23],\n \"my_x0y1\" : rgb_ins_params[24],\n \"my_x0y0\" : rgb_ins_params[25]\n }\n return rgb_camera_params_obj\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_dir', type=str, default='/home/djcam/Documents/HDD/datasets/ikea/ikea_asm/',\n help='directory of the IKEA assembly dataset')\n parser.add_argument('--square_size', type=float, default=4.0,\n help='calibration chessboard square size (in centimetres)')\n parser.add_argument('--num_threads', type=int, default=4,\n help='number of threads for chessboard function')\n parser.add_argument('--debug_dir', type=str, default='',\n help='path for debug chessboard images')\n args = parser.parse_args()\n\n get_extrinsic_parameters(args)", "sub_path": "human_pose/calibration.py", "file_name": "calibration.py", "file_ext": "py", "file_size_in_byte": 10409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.indices", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.findChessboardCorners", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_COUNT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.cornerSubPix", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.drawChessboardCorners", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 74, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.CALIB_USE_INTRINSIC_GUESS", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_PRINCIPAL_POINT", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_ASPECT_RATIO", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_ZERO_TANGENT_DIST", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K1", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K2", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K3", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K4", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K5", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.CALIB_FIX_K6", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cv2.calibrateCamera", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.calibrateCamera", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.getOptimalNewCameraMatrix", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.CALIB_FIX_INTRINSIC", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.stereoCalibrate", "line_number": 131, "usage_type": "call"}, {"api_name": "cv2.stereoCalibrate", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 218, "usage_type": "call"}]}
+{"seq_id": "580133782", "text": "# Inspired by http://stackoverflow.com/questions/4160175/detect-tap-with-pyaudio-from-live-mic\n# Adjusted by Rotem Hemo\n\nimport pyaudio\nimport struct\nimport math\n\nfrom audio_recorder import Record\n\n\nclass Segmenter:\n\tdef __init__(self, block_size_in_sec=0.02, rate=44100, channels=1, audio_format=pyaudio.paInt16, tap_threshold=0.010):\n\t\tself.recoder = Record()\n\t\tself.audio_stream = self.recoder.get_stream()\n\n\t\tself.block_size_in_sec = block_size_in_sec\n\t\tself.channels = channels\n\t\tself.rate = rate\n\t\tself.frames_per_block = int(self.rate * self.block_size_in_sec)\n\t\tself.audio_foramt = audio_format\n\n\t\tself.tap_threshold = tap_threshold\n\n\t\tself.max_tap_block = 0.15 / self.block_size_in_sec\n\t\tself.noisy_count = self.max_tap_block + 1\n\t\tself.quiet_count = 0\n\n\t\tself.over_sensitive = 15.0 / self.block_size_in_sec\n\t\tself.under_sensitive = 120.0 / self.block_size_in_sec\n\n\tdef close_strem(self):\n\t\tself.audio_stream.close()\n\n\t@staticmethod\n\tdef rms(data_block):\n\t\tsamples_count = len(data_block) / 2\n\t\tshort_normalize = (1.0 / 32768.0)\n\t\tstring_format = \"%dh\" % samples_count\n\t\tshorts = struct.unpack(string_format, data_block)\n\n\t\tsum_squares = 0.0\n\t\tfor sample in shorts:\n\t\t\tn = sample * short_normalize\n\t\t\tsum_squares += n * n\n\n\t\treturn math.sqrt(sum_squares / samples_count)\n\n\tdef grab_and_detect(self, callback):\n\n\t\t# grab a block of sound\n\t\tblock = self.audio_stream.read(self.frames_per_block)\n\n\t\t# get rms\n\t\tamplitude = self.rms(block)\n\n\t\tif amplitude > self.tap_threshold:\n\t\t\t# noisy block\n\t\t\tself.quiet_count = 0\n\t\t\tself.noisy_count += 1\n\t\t\tif self.noisy_count > self.over_sensitive:\n\t\t\t\t# turn down the sensitivity\n\t\t\t\tself.tap_threshold *= 1.1\n\t\telse:\n\t\t\t# quiet block.\n\n\t\t\tif 1 <= self.noisy_count <= self.max_tap_block:\n\t\t\t\tcallback()\n\n\t\t\tself.noisy_count = 0\n\n\t\t\tself.quiet_count += 1\n\t\t\tif self.quiet_count > self.under_sensitive:\n\t\t\t\t# turn up the sensitivity\n\t\t\t\tself.tap_threshold *= 0.9\n\n\n\n\n\n", "sub_path": "code/project/detection/segmenter.py", "file_name": "segmenter.py", "file_ext": "py", "file_size_in_byte": 1913, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pyaudio.paInt16", "line_number": 12, "usage_type": "attribute"}, {"api_name": "audio_recorder.Record", "line_number": 13, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 39, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "165893718", "text": "# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.12-x86_64/egg/dicom_tools/rescale.py\n# Compiled at: 2018-05-21 04:28:19\n# Size of source mod 2**32: 1918 bytes\nfrom __future__ import print_function\nimport numpy as np\nfrom skimage import exposure\nfrom skimage import img_as_ubyte, img_as_uint\n\ndef rescale16bit(imgIn, verbose=False):\n if imgIn.min() < 0:\n imgIn += abs(imgIn.min())\n imgOut = exposure.rescale_intensity(imgIn, in_range='uint16', out_range='uint16')\n if imgOut.min() < 0:\n print('rescale16bit: WARNING imgOut has negative value')\n imgOut = imgOut.astype(np.uint16)\n out = img_as_uint(imgOut)\n if verbose:\n print('rescale16bit')\n print('type(image) ', type(out))\n print('type(image[0][0]) ', type(out[0][0]))\n return out\n\n\ndef rescale8bit(imgIn, verbose=False):\n if imgIn.min() < 0:\n imgIn += abs(imgIn.min())\n imgOut = exposure.rescale_intensity(imgIn, in_range='uint16', out_range='uint8')\n if imgOut.min() < 0:\n print('rescale8bit: WARNING imgOut has negative value')\n imgOut = imgOut.astype(np.uint8)\n out = img_as_ubyte(imgOut)\n if verbose:\n print('rescale8bit')\n print('type(image) ', type(out))\n print('type(image[0][0]) ', type(out[0][0]))\n return out", "sub_path": "pycfiles/dicom_upload-0.1.2-py2.py3-none-any/rescale.cpython-37.py", "file_name": "rescale.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "skimage.exposure.rescale_intensity", "line_number": 15, "usage_type": "call"}, {"api_name": "skimage.exposure", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 18, "usage_type": "attribute"}, {"api_name": "skimage.img_as_uint", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.exposure.rescale_intensity", "line_number": 30, "usage_type": "call"}, {"api_name": "skimage.exposure", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "189827380", "text": "import os\nimport os.path\nimport jieba\nimport codecs\nimport pickle\n\nclass ProcessDocument:\n\tdef __init__(self,fileRoot,stopwordFile):\n\t\tif os.path.isdir(fileRoot) and os.path.exists(stopwordFile):\n\t\t\tself.fileRoot = fileRoot\n\t\t\tself.stopwordFile = stopwordFile\n\t\telse:\n\t\t\traise Exception(\"fileRoot is not a folder or soopwordFile wrong!\")\n\t\n\tdef __work4ConWordVec(self,dirpath,filename,documents):\n\t\twith codecs.open(os.path.join(dirpath,filename),\"r\",encoding=\"gbk\") as fr:\n\t\t\t[documents.append(line.strip()) for line in fr if not line.isspace() and line.find(\"content>\")==-1]\n\n\n\tdef constructWordVectorAndVocabulary(self):\n\t\tposDocuments=[]\n\t\tnegDocuments=[]\n\t\t[[self.__work4ConWordVec(dirpath,filename,posDocuments) if dirpath.endswith(\"pos\") else self.__work4ConWordVec(dirpath,filename,negDocuments) if dirpath.endswith(\"neg\") else none for filename in filenames]for dirpath,dirnames,filenames in os.walk(self.fileRoot)]\n\t\tself.documents = posDocuments + negDocuments\n\n\n\t\twith codecs.open(self.stopwordFile,\"r\",\"utf-8\") as fr:\n\t\t\tself.stopwords = [line.strip() for line in fr if not line.isspace()]\n\n\t\t\n\t\tposwords=[(list(jieba.cut(document))) for document in posDocuments]\n\t\tnegwords=[(list(jieba.cut(document))) for document in negDocuments]\n\t\tself.words = poswords + negwords\n\n\n\n\t\twordstemp=[]\n\t\t[[wordstemp.append(word) for word in posword]for posword in poswords]\n\t\t[[wordstemp.append(word) for word in negword]for negword in negwords]\n\t\tself.vocabulary=list(set(wordstemp))\n\n\t\tself.boundary = (len(posDocuments),len(self.documents)) \n\n\n\tdef pickleSomething(self,pickleFileRoot):\n\t\tif not hasattr(self,'vocabulary'):\n\t\t\tself.constructWordVectorAndVocabulary()\n\t\twith open(pickleFileRoot,\"wb\") as fwb:\n\t\t\tpickle.dump(self,fwb)\n\n\n\ndef test1():\n\tobj = ProcessDocument(\"./corpus/all1\",\"./corpus/ChnStopList.txt\")\n\tobj.constructWordVectorAndVocabulary()\n\tprint(\"len(obj.documents): \",len(obj.documents))\n\tprint(\"obj.doucments[0]: \",obj.documents[0])\n\tprint(\"len(obj.words): \",len(obj.words))\n\tprint(\"obj.words[0] length: \",len(obj.words[0]),\" obj.words[0]: \",obj.words[0])\n\tprint(\"obj.words[1] length: \",len(obj.words[1]),\" obj.words[1]: \",obj.words[1])\n\tprint(\"self.boundary: \",obj.boundary)\n\tprint(\"len(obj.vocabulary): \",len(obj.vocabulary))\n\tprint(\"pos before neg and obj.documents[0]=obj.posDocuments[0]: \",obj.documents[0])\n\tprint(\"neg after pos and obj.doucment[boundary]=obj.negDocuments[0]: \",obj.documents[obj.boundary[0]])\n\tprint(\"obj.vocabulary: \",obj.vocabulary)\n\tprint(\"obj.vocabulary[非常]: \",obj.vocabulary.count(\"非常\"))\n\ttemp=[]\n\t[[temp.append(1) if word==\"非常\" else None for word in wordstemp] for wordstemp in obj.words]\n\tprint(\"非常 in all documents nums : \",len(temp))\n\n\n\t# print(\"len(obj.posDocuments)\",len(obj.posDocuments),\" len(obj.negDocuments)\",len(obj.negDocuments))\n\t# print(\"posDocuments[0]: \",obj.posDocuments[0])\n\t# print(\"negDocuments[0]: \",obj.negDocuments[0])\n\t# print(\"len(obj.poswords): \",len(obj.poswords),\" len(obj.poswords): \",len(obj.negwords))\n\t# print(\"poswords[0]: \",obj.poswords[0])\n\t# print(\"negwords[0]: \",obj.negwords[0])\n\t# print(\"len(vocabulary): \",len(obj.vocabulary))\n\t# print(obj.vocabulary)\n\ndef test():\n\tobj = ProcessDocument(\"./corpus/train\",\"./corpus/ChnStopList.txt\")\n\tobj.constructWordVectorAndVocabulary()\n\tobj.pickleSomething(\"./process.out\")\n\nif __name__=='__main__':\n\ttest()", "sub_path": "lda_python/sentimentProcess.py", "file_name": "sentimentProcess.py", "file_ext": "py", "file_size_in_byte": 3365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.isdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 23, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 27, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 31, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "357372022", "text": "from PyQt4 import QtGui, QtCore\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass WinMain(QtGui.QMainWindow):\n\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n\n self.resize(1500, 900)\n self.setWindowTitle(\"Reserver v0.1\")\n\n self.__MDI_area = QtGui.QMdiArea()\n self.setCentralWidget(self.__MDI_area)\n\n self.__setupMenubar()\n\n self.__databasesRoot = \"/Users/matthewjbelcher/Documents/Python/db\"\n self.__openMDI_subWindows = {}\n\n self.__projectStartDate = None\n self.__projectEndDate_o = None\n self.__projectEndDate_d = None\n self.__project_oLength = None\n self.__project_dLength = None\n\n self.show()\n\n def __setupMenubar(self):\n\n self.__menubar = self.menuBar()\n self.__menus = {}\n\n # File menu\n\n self.__menus[\"file\"] = self.__menubar.addMenu(\"&File\")\n\n # View\n\n self.__menus[\"view\"] = self.__menubar.addMenu(\"&View\")\n\n qAction_showProjectExplorer = QAction_ShowProjectExplorer(\"&Show project explorer\", self)\n self.__menus[\"view\"].addAction(qAction_showProjectExplorer)\n\n qAction_showImportExplorer = QAction_ShowImportExplorer(\"&Import data\", self)\n self.__menus[\"view\"].addAction(qAction_showImportExplorer)\n\n qAction_showReservingClassExplorer = QAction_ShowReservingClassExplorer(\"&Show reserving class types\", self)\n self.__menus['view'].addAction(qAction_showReservingClassExplorer)\n\n def classifyNewSubWindow(self, name, subWindow, type_):\n self.__openMDI_subWindows[name] = {\"subWindow\": subWindow,\n \"type\": type_}\n\n def declassifySubWindow(self, name):\n del self.__openMDI_subWindows[name]\n\n def addRClassTypeMenu(self, window):\n self.__menus['rClassTypes'] = self.__menubar.addMenu('&Reserving class types')\n\n qAction_addRClassType = QAction_AddRClassType('&Add type', self, window)\n self.__menus['rClassTypes'].addAction(qAction_addRClassType)\n\n qAction_removeRClassType = QAction_RemoveRClassType('&Remove selected type', self, window)\n self.__menus['rClassTypes'].addAction(qAction_removeRClassType)\n\n def removeRClassTypeMenu(self):\n self.__menubar.removeAction(self.__menus['rClassTypes'].menuAction())\n\n @property\n def openMDI_subWindows(self):\n return self.__openMDI_subWindows\n\n @property\n def MDI_area(self):\n return self.__MDI_area\n\n @property\n def databasesRoot(self):\n return self.__databasesRoot\n\n @property\n def projectStartDate(self):\n return self.__projectStartDate\n\n @projectStartDate.setter\n def projectStartDate(self, value):\n if isinstance(value, str):\n self.__projectStartDate = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectStartDate = value\n\n @property\n def projectEndDate_o(self):\n return self.__projectEndDate_o\n\n @projectEndDate_o.setter\n def projectEndDate_o(self, value):\n if isinstance(value, str):\n self.__projectEndDate_o = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectEndDate_o = value\n\n @property\n def projectEndDate_d(self):\n return self.__projectEndDate_d\n\n @projectEndDate_d.setter\n def projectEndDate_d(self, value):\n if isinstance(value, str):\n self.__projectEndDate_d = datetime.datetime.strptime(value, \"%Y-%m-%d\").date()\n else:\n self.__projectEndDate_d = value\n\n @property\n def project_oLength(self):\n return self.__project_oLength\n\n @project_oLength.setter\n def project_oLength(self, value):\n self.__project_oLength = value\n\n @property\n def project_dLength(self):\n return self.__project_dLength\n\n @project_dLength.setter\n def project_dLength(self, value):\n self.__project_dLength = value\n\n def oCount(self, oLength=None):\n if oLength is None:\n oLength = self.__project_oLength\n\n monthCount = (self.__projectEndDate_o.year - self.__projectStartDate.year) * 12 + \\\n (self.__projectEndDate_o.month - self.__projectStartDate.month) + 1\n oCount = (monthCount - 1) // oLength + 1\n\n return oCount\n\n def dCount(self, dLength=None):\n if dLength is None:\n dLength = self.__project_dLength\n\n monthCount = (self.__projectEndDate_d.year - self.__projectStartDate.year) * 12 + \\\n (self.__projectEndDate_d.month - self.__projectStartDate.month) + 1\n dCount = (monthCount - 1) // dLength + 1\n\n return dCount\n\n def oHeaders(self, oLength=0):\n if oLength == 0:\n oLength = self.__project_oLength\n\n oCount = self.oCount(oLength)\n\n oPeriods = [self.__projectStartDate + relativedelta(months=o * oLength) for o in range(oCount)]\n\n if oLength == 1:\n labels = [oPeriod.strftime(\"%b%y\") for oPeriod in oPeriods]\n elif oLength == 3:\n labels = [str(oPeriod.year) + \" Q\" + str((oPeriod.month - 1) // 3 + 1) for oPeriod in oPeriods]\n elif oLength == 6:\n labels = [str(oPeriod.year) + \" H\" + str((oPeriod.month - 1) // 6 + 1) for oPeriod in oPeriods]\n elif oLength == 12:\n labels = [oPeriod.year for oPeriod in oPeriods]\n else:\n oPeriods.append(self.__projectStartDate + relativedelta(months=oCount * oLength))\n labels = [oPeriods[o].strftime(\"%b%y\") + \"-\" + (oPeriods[o + 1] - relativedelta(days=1)).strftime(\"%b%y\")\n for o in range(oCount)]\n\n return labels\n\n def dHeaders(self, dLength=0, basis=\"Development\"):\n if dLength == 0:\n dLength = self.__project_dLength\n\n dCount = self.dCount(dLength)\n project_dCount = self.dCount()\n\n labels = []\n\n if basis == \"Development\":\n labels = [project_dCount - d * dLength for d in range(dCount)]\n labels.reverse()\n\n elif basis == \"Calendar\":\n labels = [(self.__projectEndDate_d - relativedelta(months=d)).strftime(\"%b%y\") for d in range(dCount)]\n labels.reverse()\n\n return labels\n\n\nclass QAction_ShowProjectExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showProjectExplorer)\n\n def __showProjectExplorer(self):\n projectExplorer = self.__parent.openMDI_subWindows[\"Project Explorer\"]\n self.__parent.MDI_area.setActiveSubWindow(projectExplorer[\"subWindow\"].MDI_subWindow)\n\n projectExplorer[\"subWindow\"].MDI_subWindow.setWindowState(QtCore.Qt.WindowMaximized)\n\n\nclass QAction_ShowImportExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showImportExplorer)\n\n def __showImportExplorer(self):\n try:\n self.__parent.openMDI_subWindows[\"Import Explorer\"]\n except KeyError:\n import mImporter\n projectPath = self.__parent.openMDI_subWindows[\"Project Explorer\"][\"subWindow\"].projectPath\n mImporter.SubWinImportExplorer(projectPath=projectPath, parent=self.parent())\n\n\nclass QAction_ShowReservingClassExplorer(QtGui.QAction):\n\n def __init__(self, caption, parent):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n\n self.triggered.connect(self.__showReservingClassExplorer)\n\n def __showReservingClassExplorer(self):\n try:\n self.__parent.openMDI_subWindows['Reserving class types']\n except KeyError:\n import mReservingClasses\n projectPath = self.__parent.openMDI_subWindows['Project Explorer']['subWindow'].projectPath\n mReservingClasses.SubWinReservingClassExplorer(projectPath=projectPath, parent=self.parent())\n\n\nclass QAction_AddRClassType(QtGui.QAction):\n\n def __init__(self, caption, parent, window):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n self.__window = window\n\n self.triggered.connect(self.__addRClassType)\n\n def __addRClassType(self):\n import mReservingClasses\n mReservingClasses.WinAddOrEditReservingClassType(parent=self.__window)\n\n\nclass QAction_RemoveRClassType(QtGui.QAction):\n\n def __init__(self, caption, parent, window):\n QtGui.QAction.__init__(self, caption, parent)\n\n self.__parent = parent\n self.__window = window\n\n self.triggered.connect(self.__removeRClassType)\n\n def __removeRClassType(self):\n self.__window.removeRClassType()\n\n\nclass MDI_SubWindow(QtGui.QWidget):\n \"\"\" Base class for MDI sub-windows \"\"\"\n\n def __init__(self, MDI_area):\n QtGui.QWidget.__init__(self)\n\n self.__MDI_area = MDI_area\n self.__mainWindow = self.__MDI_area.parent()\n\n self.__setupWindow()\n\n def __setupWindow(self):\n\n self.__MDI_subWindow = self.__MDI_area.addSubWindow(self)\n self.__MDI_subWindow.resize(600, 600)\n\n self.__MDI_subWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n self.__grid = QtGui.QGridLayout()\n self.__grid.setSpacing(0)\n self.__grid.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.__grid)\n\n @property\n def MDI_area(self):\n return self.__MDI_area\n\n @property\n def mainWindow(self):\n return self.__mainWindow\n\n @property\n def MDI_subWindow(self):\n return self.__MDI_subWindow\n\n @property\n def grid(self):\n return self.__grid\n", "sub_path": "mWindows.py", "file_name": "mWindows.py", "file_ext": "py", "file_size_in_byte": 9774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMainWindow.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMdiArea", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 158, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 169, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 170, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 189, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 195, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 195, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 198, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 198, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 208, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 208, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 211, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 214, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 214, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 214, "usage_type": "name"}, {"api_name": "mImporter.SubWinImportExplorer", "line_number": 226, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 229, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 229, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 232, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 232, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 232, "usage_type": "name"}, {"api_name": "mReservingClasses.SubWinReservingClassExplorer", "line_number": 244, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 247, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 247, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 250, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 250, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 250, "usage_type": "name"}, {"api_name": "mReservingClasses.WinAddOrEditReservingClassType", "line_number": 259, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 262, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 262, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QAction.__init__", "line_number": 265, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QAction", "line_number": 265, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 265, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 276, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 276, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QWidget.__init__", "line_number": 280, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QWidget", "line_number": 280, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 280, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 292, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 292, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 294, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 294, "usage_type": "name"}]}
+{"seq_id": "275545559", "text": "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.artist import setp\nimport pandas.core.common as com\nfrom pandas.compat import range, lrange, lmap, map, zip\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom statsmodels.nonparametric.smoothers_lowess import lowess\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nimport pandas as pd \nimport numpy as np \n\n\"\"\"\nThis module provides helper methods to carry out data distribution\nanalysis on flight data found on https://www.kaggle.com/usdot/flight-delays.\n\nThese methods are specific to the flight dataset and is not meant to be \ngeneric functions for other datasets.\n\"\"\"\n\ndef scatter_matrix_all(frame, alpha=0.5, figsize=None, grid=False, diagonal='hist', marker='.', density_kwds=None, hist_kwds=None, range_padding=0.05, **kwds):\n \n df = frame\n num_cols = frame._get_numeric_data().columns.values\n n = df.columns.size\n fig, axes = plt.subplots(nrows=n, ncols=n, figsize=figsize, squeeze=False)\n\n # no gaps between subplots\n fig.subplots_adjust(wspace=0, hspace=0)\n\n mask = com.notnull(df)\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwds.setdefault('c', plt.rcParams['patch.facecolor'])\n\n boundaries_list = []\n for a in df.columns:\n if a in num_cols:\n values = df[a].values[mask[a].values]\n else:\n values = df[a].value_counts()\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2.\n boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))\n\n for i, a in zip(lrange(n), df.columns):\n for j, b in zip(lrange(n), df.columns):\n ax = axes[i, j]\n\n if i == j:\n if a in num_cols: # numerical variable\n values = df[a].values[mask[a].values]\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == 'hist':\n ax.hist(values, **hist_kwds)\n elif diagonal in ('kde', 'density'):\n from scipy.stats import gaussian_kde\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n ax.set_xlim(boundaries_list[i])\n else: # categorical variable\n values = df[a].value_counts()\n ax.bar(list(range(df[a].nunique())), values)\n else:\n common = (mask[a] & mask[b]).values\n # two numerical variables\n if a in num_cols and b in num_cols:\n if i > j:\n ax.scatter(df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds)\n # The following 2 lines add the lowess smoothing\n ys = lowess(df[a][common], df[b][common])\n ax.plot(ys[:,0], ys[:,1], 'red')\n else:\n pearR = df[[a, b]].corr()\n ax.text(df[b].min(), df[a].min(), 'r = %.4f' % (pearR.iloc[0][1]))\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n # two categorical variables\n elif a not in num_cols and b not in num_cols:\n if i > j:\n from statsmodels.graphics import mosaicplot\n mosaicplot.mosaic(df, [b, a], ax, labelizer=lambda k:'')\n # one numerical variable and one categorical variable\n else:\n if i > j:\n tol = pd.DataFrame(df[[a, b]])\n if a in num_cols:\n label = [ k for k, v in tol.groupby(b) ]\n values = [ v[a].tolist() for k, v in tol.groupby(b) ]\n ax.boxplot(values, labels=label)\n else:\n label = [ k for k, v in tol.groupby(a) ]\n values = [ v[b].tolist() for k, v in tol.groupby(a) ]\n ax.boxplot(values, labels=label, vert=False)\n\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n _label_axis(ax, kind='x', label=b, position='bottom', rotate=True)\n _label_axis(ax, kind='y', label=a, position='left')\n\n if j!= 0:\n ax.yaxis.set_visible(False)\n if i != n-1:\n ax.xaxis.set_visible(False)\n\n for ax in axes.flat:\n setp(ax.get_xticklabels(), fontsize=8)\n setp(ax.get_yticklabels(), fontsize=8)\n return fig\n \n\ndef _label_axis(ax, kind='x', label='', position='top', ticks=True, rotate=False):\n from matplotlib.artist import setp\n if kind == 'x':\n ax.set_xlabel(label, visible=True)\n ax.xaxis.set_visible(True)\n ax.xaxis.set_ticks_position(position)\n ax.xaxis.set_label_position(position)\n if rotate:\n setp(ax.get_xticklabels(), rotation=90)\n elif kind == 'y':\n ax.yaxis.set_visible(True)\n ax.set_ylabel(label, visible=True)\n #ax.set_ylabel(a)\n ax.yaxis.set_ticks_position(position)\n ax.yaxis.set_label_position(position)\n return\n\ndef _get_marker_compat(marker):\n import matplotlib.lines as mlines\n import matplotlib as mpl\n if mpl.__version__ < '1.1.0' and marker == '.':\n return 'o'\n if marker not in mlines.lineMarkers:\n return 'o'\n return marker\n\ndef plotBarPercentage(data, groupAttr, dependencyAttr, axAttr, condition, filter=0):\n totaldf = data.groupby([groupAttr])[dependencyAttr].count()\n denomdf = data.loc[condition]\n denomdf = denomdf.groupby([groupAttr])[dependencyAttr].count()\n df = denomdf/totaldf*100\n df = df[df > filter]\n if len(df) > 0:\n ax = df.plot.bar(figsize=(14, 6), ax = axAttr)\n ax.set_title(dependencyAttr)\n ax.set_ylabel('Percentage')\n\ndef plotBar(data, groupAttr, dependencyAttr, axAttr, condition):\n df = data.loc[condition]\n df = df.groupby([groupAttr])[dependencyAttr].count()\n ax = df.plot.bar(figsize=(14, 6), ax = axAttr)\n ax.set_ylabel(dependencyAttr)\n\ndef plotBars(data, groupAttr, dependencyAttrs, rows, cols, conditions):\n fig, axes = plt.subplots(nrows=rows, ncols=cols)\n r = 0\n c = 0\n for i in range(len(dependencyAttrs)):\n plotBar(data, groupAttr, dependencyAttrs[i], axes[r,c], conditions[i])\n if c == cols-1:\n c = -1\n r = r + 1\n c = c + 1\n \ndef plotBarsPercentage(data, groupAttr, dependencyAttrs, rows, cols, conditions, filter = 0):\n fig, axes = plt.subplots(nrows=rows, ncols=cols)\n r = 0\n c = 0\n for i in range(len(dependencyAttrs)):\n if rows > 1:\n plotBarPercentage(data, groupAttr, dependencyAttrs[i], axes[r,c], conditions[i], filter)\n else:\n plotBarPercentage(data, groupAttr, dependencyAttrs[i], axes[c], conditions[i], filter)\n\n if c == cols-1:\n c = -1\n r = r + 1\n c = c + 1\n\ndef plotMapData(df, longAttr, latAttr, valAttr, figw=8, figh=8, initmarksize= 0.5):\n # setup Lambert Conformal basemap.\n plt.figure(figsize=(figw,figh))\n m = Basemap(width=12000000,height=9000000,projection='lcc',\n resolution='c',lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.)\n # draw a boundary around the map, fill the background.\n # this background will end up being the ocean color, since\n # the continents will be drawn on top.\n m.drawmapboundary(fill_color='aqua')\n # fill continents, set lake color same as ocean color.\n m.fillcontinents(color='coral',lake_color='aqua')\n # draw parallels and meridians.\n # label parallels on right and top\n # meridians on bottom and left\n parallels = np.arange(0.,81,10.)\n # labels = [left,right,top,bottom]\n m.drawparallels(parallels,labels=[False,True,True,False])\n meridians = np.arange(10.,351.,20.)\n m.drawmeridians(meridians,labels=[True,False,False,True])\n # plot blue dot on Boulder, colorado and label it as such.\n\n for lon, lat, mag in zip(df[longAttr].values, df[latAttr].values, df[valAttr].values):\n xpt,ypt = m(lon, lat)\n lonpt, latpt = m(xpt,ypt,inverse=True)\n msize = mag * initmarksize\n #map.plot(x, y, marker_string, markersize=msize)\n m.plot(xpt,ypt,'bo', markersize=msize) # plot a blue dot there \n\n plt.show()\n\ndef plotJointPlotSplice0_10_240_By(x, y, delayAttr, data):\n # Create dataset based on splice conditions\n flights_greater_than_0_and_less_than_10 = data.loc[\n (data[delayAttr] > 0)\n & (data[delayAttr] <= 10)\n ]\n flights_greater_than_10_and_less_than_240 = data.loc[\n (data[delayAttr] > 10)\n & (data[delayAttr] <= 240)\n ]\n\n flights_greater_than_240 = data.loc[\n (data[delayAttr] > 240)\n ]\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_0_and_less_than_10, size=4)\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_10_and_less_than_240, size=4)\n sns.jointplot(x=x, y=y, kind=\"kde\", data=flights_greater_than_240, size=4)\n\ndef plotJointPlot(x, y, delayAttr, data, title):\n df = data\n datasetSize = len(df)\n g = sns.jointplot(x=x, y=y, kind=\"kde\", data=df, size=4)\n txt = plt.title(title + \",\\n Dataset Size = \" + str(datasetSize), fontsize = 24, y = 0.5, x = 6)\n \ndef plotJointPlotSplice(x, y, delayAttr, data, cond, title):\n df = data.loc[cond]\n datasetSize = len(df)\n g = sns.jointplot(x=x, y=y, kind=\"kde\", data=df, size=4)\n txt = plt.title(title + \",\\n Dataset Size = \" + str(datasetSize), fontsize = 24, y = 0.5, x = 6)\n \ndef generateDistributionDF(data, timeAttr, monthAttr, delayAttr, aggfunc= np.sum):\n pivot = pd.pivot_table(data,index=[monthAttr, timeAttr],values=[delayAttr],aggfunc=aggfunc)\n pivot.reset_index(level=0, inplace=True)\n pivot.reset_index(level=0, inplace=True)\n return pivot\n\ndef plot3D(data, x, y, z):\n distdf = generateDistributionDF(data, y, x, z)\n distdf_avg = generateDistributionDF(data, y, x, z, np.mean) \n\n fig = plt.figure(figsize=(16, 6), dpi=80)\n\n #---- First subplot\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n\n surf = ax.plot_trisurf(distdf[x], distdf[y], distdf[z], cmap=plt.cm.jet, linewidth=0.03)\n fig.colorbar(surf)\n\n #---- Second subplot\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n surf = ax.plot_trisurf(distdf_avg[x], distdf_avg[y], distdf_avg[z], cmap=plt.cm.jet, linewidth=0.03)\n fig.colorbar(surf)\n\n plt.show() \n", "sub_path": "airport/chartlib.py", "file_name": "chartlib.py", "file_ext": "py", "file_size_in_byte": 10897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seaborn.set_style", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.core.common.notnull", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.core.common", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.compat.lrange", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.compat.lrange", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.stats.gaussian_kde", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.compat.range", "line_number": 70, "usage_type": "call"}, {"api_name": "statsmodels.nonparametric.smoothers_lowess.lowess", "line_number": 78, "usage_type": "call"}, {"api_name": "statsmodels.graphics.mosaicplot.mosaic", "line_number": 89, "usage_type": "call"}, {"api_name": "statsmodels.graphics.mosaicplot", "line_number": 89, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.artist.setp", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.__version__", "line_number": 140, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.lineMarkers", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.lines", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "pandas.compat.range", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "pandas.compat.range", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.compat.zip", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "seaborn.jointplot", "line_number": 233, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 234, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 235, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "seaborn.jointplot", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pandas.pivot_table", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 257, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 264, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 269, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}]}
+{"seq_id": "645483251", "text": "from typing import List\n\n\"\"\"\n\nGiven the array queries of positive integers between 1 and m,\nyou have to process all queries[i] (from i=0 to i=queries.length-1)\naccording to the following rules:\n\nIn the beginning, you have the permutation P=[1,2,3,...,m].\nFor the current i, find the position of queries[i] in the permutation P\n(indexing from 0) and then move this at the beginning of the\npermutation P. Notice that the position of queries[i] in P\nis the result for queries[i].\nReturn an array containing the result for the given queries.\n\nExample :\nInput: queries = [3,1,2,1], m = 5\nOutput: [2,1,2,1]\nExplanation: The queries are processed as follow:\n\nFor i=0: queries[i]=3, P=[1,2,3,4,5], position of 3 in P is 2,\nthen we move 3 to the beginning of P resulting in P=[3,1,2,4,5].\n\nFor i=1: queries[i]=1, P=[3,1,2,4,5], position of 1 in P is 1,\nthen we move 1 to the beginning of P resulting in P=[1,3,2,4,5].\n\nFor i=2: queries[i]=2, P=[1,3,2,4,5], position of 2 in P is 2,\nthen we move 2 to the beginning of P resulting in P=[2,1,3,4,5].\n\nFor i=3: queries[i]=1, P=[2,1,3,4,5], position of 1 in P is 1,\nthen we move 1 to the beginning of P resulting in P=[1,2,3,4,5].\nTherefore, the array containing the result is [2,1,2,1].\n\nLink : https://leetcode.com/problems/queries-on-a-permutation-with-key/\n\n\"\"\"\n\n\n'''\nNaive approach\nThere are three things you need to know:\n1. Find index by element\n2. Delete by index .pop(index_position)\n3. Insert by index .insert(position,element)\n\nbut list is the incorrect DS to use in python because it is not optimized for\nmultiple insertions and deletions.\nLists are not optimized for modifications at the front,\nand somelist.insert(0, something) is an O(n) operation.\nAll of the above mentioned approaches are O(n)\n\npython tip :\nCheck out itertools count instead of using List.index\nsince list.index returns first occurance only\n'''\n\n\nclass solution():\n def process_queries(self, queries: List[int], m: int) -> List[int]:\n permutation = [element for element in range(1, m+1)]\n sol = []\n for index in range(0, len(queries)):\n value = queries[index]\n ele = permutation.index(value)\n sol.append(ele)\n permutation.pop(ele)\n permutation.insert(0, value)\n return sol\n\n\nobj = solution()\n# print(obj.process_queries([3, 1, 2, 1], 5))\n# print(obj.process_queries([4, 1, 2, 2], 4))\nprint(obj.process_queries([7, 5, 5, 8, 3], 8))\n\n\n'''\n\nhttps://cp-algorithms.com/data_structures/fenwick.html\n\n'''\n", "sub_path": "1409_queries_on_a_permutation_in_key.py", "file_name": "1409_queries_on_a_permutation_in_key.py", "file_ext": "py", "file_size_in_byte": 2520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 59, "usage_type": "name"}]}
+{"seq_id": "469749610", "text": "from dagster.core.instance import DagsterInstance\nfrom dagster.core.storage.event_log.sql_event_log import SqlEventLogStorage\n\n\ndef migrate_event_log_data(instance=None):\n '''\n Utility method to migrate the data in the existing event log records. Reads every event log row\n reachable from the instance and reserializes it to event log storage. Deserializing and then\n reserializing the event from storage allows for things like SQL column extraction, filling\n explicit default values, etc.\n '''\n if not instance:\n instance = DagsterInstance.get()\n\n event_log_storage = instance._event_storage # pylint: disable=protected-access\n if not isinstance(event_log_storage, SqlEventLogStorage):\n return\n\n for run in instance.get_runs():\n event_records_by_id = event_log_storage.get_logs_for_run_by_log_id(run.run_id)\n for record_id, event in event_records_by_id.items():\n event_log_storage.update_event_log_record(record_id, event)\n", "sub_path": "python_modules/dagster/dagster/core/storage/event_log/migration.py", "file_name": "migration.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dagster.core.instance.DagsterInstance.get", "line_number": 13, "usage_type": "call"}, {"api_name": "dagster.core.instance.DagsterInstance", "line_number": 13, "usage_type": "name"}, {"api_name": "dagster.core.storage.event_log.sql_event_log.SqlEventLogStorage", "line_number": 16, "usage_type": "argument"}]}
+{"seq_id": "379040465", "text": "import json\nimport time\nfrom requests import get, post\nfrom config import FORM_RECOGNIZER_CONGIF\nfrom utilities.utilities import remove_file, decrypt_pdf\n\napim_key = FORM_RECOGNIZER_CONGIF['API_KEY']\npost_url = FORM_RECOGNIZER_CONGIF['ANALYZE_LAYOUT_ENDPOINT']\n\n\ndef init(filename, file_password, contentType='pdf', resultType=\"text\"):\n \"\"\"\n Initialize local form layout recognition process\n \"\"\"\n textResult = ''\n error_message = None\n if (contentType == 'pdf'):\n error, pdfFilename = decrypt_pdf(filename, password=file_password)\n filename = pdfFilename\n error_message = error\n\n if(not error_message):\n x = 'application' if contentType == 'pdf' else 'image'\n headers = {\n # Request headers\n 'Content-Type': f'{x}/{contentType}',\n 'Ocp-Apim-Subscription-Key': apim_key,\n }\n\n with open(filename, \"rb\") as f:\n data_bytes = f.read()\n\n try:\n resp = post(url=post_url, data=data_bytes, headers=headers)\n if resp.status_code != 202:\n textResult = f\"POST analyze failed:\\n{resp.text}\"\n\n get_url = resp.headers[\"operation-location\"]\n textResult = get_layout_results(get_url, resultType)\n remove_file(filename)\n\n except Exception as e:\n textResult = f\"POST analyze failed:\\n{str(e)}\"\n else:\n remove_file(filename)\n textResult = error_message\n\n return textResult\n\n\ndef parse_text(json_result):\n \"\"\"\n Parse final result from json response\n \"\"\"\n textResult = ''\n for result in json_result['analyzeResult']['readResults']:\n # textResult += f\"***Page No. {result['page']}***\\n\"\n for line in result['lines']:\n textResult += line['text']\n textResult += '\\n'\n textResult += '\\n'\n\n return textResult\n\n\ndef get_layout_results(get_url, resultType=\"text\"):\n \"\"\"\n Fetch requested form's layout results by using authorized token\n \"\"\"\n textResult = ''\n n_tries = 10\n n_try = 0\n wait_sec = 5\n stopProcess = False\n while (n_try < n_tries and not(stopProcess)):\n try:\n resp = get(url=get_url, headers={\n \"Ocp-Apim-Subscription-Key\": apim_key})\n resp_json = json.loads(resp.text)\n if resp.status_code != 200:\n textResult = f\"GET Layout results failed:\\n{resp_json}\"\n stopProcess = True\n\n status = resp_json[\"status\"]\n if status == \"succeeded\":\n if (resultType == \"text\"):\n textResult = parse_text(resp_json)\n elif (resultType == \"json\"):\n textResult = str(resp_json)\n stopProcess = True\n\n if status == \"failed\":\n textResult = f\"Layout Analysis failed:\\n{resp_json}\"\n stopProcess = True\n\n # Analysis still running. Wait and retry.\n time.sleep(wait_sec)\n n_try += 1\n\n except Exception as e:\n textResult = f\"GET analyze results failed:\\n{str(e)}\"\n stopProcess = True\n return textResult\n", "sub_path": "CognitiveAPI/process_forms/extract_local.py", "file_name": "extract_local.py", "file_ext": "py", "file_size_in_byte": 3168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "config.FORM_RECOGNIZER_CONGIF", "line_number": 7, "usage_type": "name"}, {"api_name": "config.FORM_RECOGNIZER_CONGIF", "line_number": 8, "usage_type": "name"}, {"api_name": "utilities.utilities.decrypt_pdf", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "utilities.utilities.remove_file", "line_number": 40, "usage_type": "call"}, {"api_name": "utilities.utilities.remove_file", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "424275303", "text": "#! /usr/bin/env python3\n#encoding: utf-8\n\nfrom datetime import datetime,timedelta\nfrom iotools import *\nfrom mytools import *\nfrom crawler import *\nfrom client import *\n\ndef search():\n\tprint(KWD)\n\tclient = TwitterClient()\n\tfw = JsonStorer('tw_search_%s' % KWD)\n\tdt,EndTm,inc = datetime(2014,11,30),datetime(2014,12,2),timedelta(1)\n\twhile dt < EndTm:\n\t\tst = dt.strftime('%Y-%m-%d')\n\t\tdt += inc\n\t\tet = dt.strftime('%Y-%m-%d')\n\t\tprint(st, et)\n\t\tquery = TW_QUERY.format((st, et))\n\t\turl = TW_SEARCH_URL.format(query)\n\t\treq =TwitterRequest(url)\n\t\treq.perform()\n\t\ttweets, cursor = client.parse_search(req)\n\t\twhile len(tweets)>0:\n\t\t\tprint(len(tweets), end=', ', flush=True)\n\t\t\tfw.write({'t':st, 'd':tweets})\n\t\t\turl = TW_SEARCH_SCROLL.format(query, cursor)\n\t\t\treq.set_url(url)\n\t\t\treq.perform()\n\t\t\ttweets, cursor = client.parse_search(req, False)\n\t\tprint()\n\tfw.close()\n\nif __name__=='__main__':\n\tsearch()\n\n\n", "sub_path": "nonparall.py", "file_name": "nonparall.py", "file_ext": "py", "file_size_in_byte": 897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "client.parse_search", "line_number": 24, "usage_type": "call"}, {"api_name": "client.parse_search", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "574362305", "text": "import gym\nimport gym_stocks\nimport random\n\nenv = gym.make('Stocks-v0')\n# print(env.reset())\nenv.reset()\n\nfor i in range(10):\n\tprs = (random.randint(0,20)-10)/10\n\tdata,reward,done, _ = env.step(prs)\n\t# print(data)\n\tprint(\"act: {}, roi(reward): {}\".format(prs,reward))\n\tprint(\"---\")\n\t#print env.step(0)\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gym.make", "line_number": 5, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "29789618", "text": "import read_Puzzle\nimport copy\nfrom collections import deque\nimport math\nimport time\nfrom random import shuffle\n\nglobal counter\ncounter = 0\nglobal checkcounter\ncheckcounter = 0\n\n\n\ndef freeFlowDumb(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n if not frontier:\n return 0\n global counter\n pos_now = frontier.pop()\n counter += 1\n value = values[idx]\n\n puzzle_temp = copy.deepcopy(puzzle)\n if pos_now == sourceB[idx]:\n if check1(puzzle_temp,idx,values,sourceA[idx],sourceB[idx],rows,columns):\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n pos_now = sourceA[idx+1]\n\n result = freeFlowDumb(puzzle_temp,rows,columns,left-1,values,sourceA,sourceB,idx+1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle_temp[pos_now[0]][pos_now[1]] == '_':\n puzzle_temp[pos_now[0]][pos_now[1]] = value\n neighbors = getNeighbor(pos_now,rows,columns)\n shuffle(neighbors)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle_temp[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowDumb(puzzle_temp,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n return 0\n\n\ndef freeFlowSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n if not frontier:\n return 0\n pos_now = frontier.pop()\n value = values[idx]\n global counter\n counter += 1\n global checkcounter\n\n puzzle_temp = copy.deepcopy(puzzle)\n if pos_now == sourceB[idx]:\n #print(checkcounter)\n\n if check1(puzzle_temp,idx,values,sourceA[idx],sourceB[idx],rows,columns) :\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n\n pos_now = sourceA[idx+1]\n\n result = freeFlowSmart(puzzle_temp,rows,columns,left-1,values,sourceA,sourceB,idx+1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle_temp[pos_now[0]][pos_now[1]] == '_' or pos_now == sourceA[idx]:\n puzzle_temp[pos_now[0]][pos_now[1]] = value\n check_move = check2(puzzle_temp,idx,values,sourceA,sourceB,rows,columns)\n if not check_move:\n puzzle_temp[pos_now[0]][pos_now[1]] = '_'\n else:\n neighbors = getNeighbor(pos_now,rows,columns)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle_temp[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowSmart(puzzle_temp,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n return 0\n\ndef freeFlowEcSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx,frontier):\n # this function is for extra credit\n if not frontier:\n return 0\n pos_now = frontier.pop()\n #print(puzzle)\n value = values[idx]\n global counter\n counter += 1\n global checkcounter\n #print(puzzle)\n #print(frontier)\n #puzzle_temp = copy.deepcopy(puzzle)\n\n if pos_now == sourceB[idx]:\n print(checkcounter)\n check_move3 = check3(puzzle, rows, columns, value)\n #check_move4 = check4(puzzle, rows, columns, sourceA, sourceB,idx,values)\n if check1(puzzle,idx,values,sourceA[idx],sourceB[idx],rows,columns) and check_move3 :\n if left == 1:\n print('done')\n print(counter)\n return puzzle\n #ordered_values,ordered_A,ordered_B = ordering (idx,values,sourceA,sourceB,puzzle,rows,columns)\n # pos_now = ordered_A[idx+1]\n # result = freeFlowEcSmart(puzzle,rows,columns,left-1,ordered_values,ordered_A,ordered_B,idx+1,[pos_now])\n pos_now = sourceA[idx + 1]\n result = freeFlowEcSmart(puzzle, rows, columns, left - 1, values, sourceA, sourceB, idx + 1,[pos_now])\n if result != 0 :\n return result\n return 0\n else:\n return 0\n else:\n if puzzle[pos_now[0]][pos_now[1]] == '_' or pos_now == sourceA[idx]:\n puzzle[pos_now[0]][pos_now[1]] = value\n\n check_move5 = check5(puzzle, idx, values, sourceA[idx], sourceB[idx], rows, columns, pos_now)\n\n #print(check_move3)\n if (not check_move5) :\n puzzle[pos_now[0]][pos_now[1]] = '_'\n elif not (check2(puzzle,idx,values,sourceA,sourceB,rows,columns)):\n puzzle[pos_now[0]][pos_now[1]] = '_'\n\n\n elif not (check4(puzzle, rows, columns, sourceA, sourceB, idx, values, pos_now)):\n puzzle[pos_now[0]][pos_now[1]] = '_'\n else:\n neighbors = getNeighbor_newnew(pos_now,rows,columns,idx,sourceB)\n # neighbors = getNeighbor(pos_now, rows, columns)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle[neighbor[0]][neighbor[1]] == '_' or neighbor == sourceB[idx]:\n frontier.append(neighbor)\n result = freeFlowEcSmart(puzzle,rows,columns,left,values,sourceA,sourceB,idx,frontier)\n if result != 0:\n return result\n if pos_now != sourceA[idx]:\n puzzle[pos_now[0]][pos_now[1]] = '_'\n return 0\n\n\n\n\ndef check1(puzzle,idx,values,sourceA,sourceB,rows,columns): # check basic constraints\n value = values[idx]\n # first check if the sourceA has only one same color source\n neighbors = getNeighbor(sourceA,rows,columns)\n count = 0\n for i in range(len(neighbors)):\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 1:\n return False\n\n # first check if the sourceB has only one same color source\n neighbors = getNeighbor(sourceB,rows,columns)\n count = 0\n for i in range(len(neighbors)):\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 1:\n return False\n\n # check non-source node\n for row in range(rows):\n for column in range(columns):\n if [row,column] != sourceA and [row,column] != sourceB:\n if puzzle[row][column] == value:\n count = 0\n neighbors = getNeighbor([row,column],rows,columns)\n for i in range(len(neighbors)):\n\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 2:\n return False\n return True\n\ndef check2(puzzle,idx,values,sourceA,sourceB,rows,columns):\n # check if there still are paths for other colors\n global checkcounter\n checkcounter += 1\n #print(checkcounter)\n #puzzle_temp = copy.deepcopy(puzzle)\n for i in range(idx+1,len(values)):\n A = sourceA[i]\n B = sourceB[i]\n value = values[i]\n # using BFS to determine whether there is still path between A and B\n frontier = deque([A])\n if not pathAvailable(puzzle,frontier,B,rows,columns,value):\n return False\n\n return True\n\ndef check3(puzzle,rows,columns,value):\n # check if the move creates dead end\n\n for i in range(rows):\n for j in range(columns):\n node_now = puzzle[i][j]\n if node_now == \"_\":\n neighbors = getNeighbor([i,j],rows,columns)\n count = 0\n for k in range(len(neighbors)):\n neighborx = neighbors[k][0]\n neighbory = neighbors[k][1]\n if puzzle[neighborx][neighbory] == value:\n count += 1\n if count >= len(neighbors) - 1:\n #print(i,j)\n #print(puzzle)\n return False\n return True\n\ndef check4(puzzle,rows,columns,sourceA,sourceB,idx,values,pos):\n # check if there is isolated blank area\n modified = []\n sourceA_temp = copy.deepcopy(sourceA)\n sourceA_temp[idx] = pos\n for i in range (rows):\n for j in range(columns):\n node_now = puzzle[i][j]\n # start from a blank\n if node_now == \"_\":\n # using BFS to determine whether there is still path to source node\n A = [i,j]\n frontier = deque([A])\n indicator = False\n block_sourceA_neighbor = []\n block_sourceB_neighbor = []\n block_neighbor = []\n while not (not frontier):\n\n A = frontier.popleft()\n #print(A)\n x = A[0]\n y = A[1]\n puzzle[x][y] = 'X'\n modified.append([x, y])\n # print(puzzle)\n neighbors = getNeighbor(A, rows, columns)\n for m in range(len(neighbors)):\n neighbor = neighbors[m]\n\n if neighbor in sourceA_temp[idx:]:\n block_sourceA_neighbor.append(sourceA_temp.index(neighbor))\n\n elif neighbor in sourceB[idx:]:\n block_sourceB_neighbor.append(sourceB.index(neighbor))\n # if neighbor in sourceA[idx:] or neighbor in sourceB[idx:]:\n # indicator = True\n elif puzzle[neighbor[0]][neighbor[1]] == '_':\n frontier.append(neighbor)\n # if not indicator:\n indicator = bool(set(block_sourceA_neighbor) & set(block_sourceB_neighbor))\n #print(indicator)\n #print(puzzle)\n if not indicator:\n #print(puzzle)\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n #print(False)\n return False\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n #print(True)\n return True\n\ndef check5 (puzzle,idx,values,sourceA,sourceB,rows,columns,pos):\n # check if the move is valid (each non-source cell can have two neighbors with same color)\n value = values[idx]\n for row in range(rows):\n for column in range(columns):\n if [row,column] != sourceA and [row,column] != sourceB and [row,column] != pos:\n if puzzle[row][column] == value:\n count = 0\n neighbors = getNeighbor([row,column],rows,columns)\n for i in range(len(neighbors)):\n\n if puzzle[neighbors[i][0]][neighbors[i][1]] == value:\n count += 1\n if count != 2:\n return False\n return True\n\ndef check6(puzzle,rows,columns,pos):\n # clever way to consider if the move is valid\n # not used\n X = rows\n Y = columns\n getcircle = lambda x, y: [[x2, y2] for x2 in range(x - 1, x + 2)\n for y2 in range(y - 1, y + 2)\n if (-1 < x <= X and\n -1 < y <= Y and\n (x != x2 or y != y2) and\n (0 <= x2 <= X) and\n (0 <= y2 <= Y))]\n circle = getcircle(pos[0],pos[1])\n print(circle)\n modified = []\n for i in range(len(circle)):\n node_now = puzzle[circle[i][0]][circle[i][1]]\n if node_now == \"_\":\n # using BFS to determine whether there is still path to source node\n A = [circle[i][0],circle[i][1]]\n frontier = deque([A])\n while not (not frontier):\n A = frontier.popleft()\n # print(A)\n x = A[0]\n y = A[1]\n puzzle[x][y] = 'X'\n modified.append([x, y])\n # print(puzzle)\n neighbors = getNeighbor(A, rows, columns)\n for m in range(len(neighbors)):\n neighbor = neighbors[m]\n if neighbor in circle:\n if puzzle[neighbor[0]][neighbor[1]] == '_':\n frontier.append(neighbor)\n # if not indicator:\n if not frontier:\n break\n for i in range(len(circle)):\n node_now = puzzle[circle[i][0]][circle[i][1]]\n if node_now == \"_\":\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n # print(False)\n return False\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n # print(False)\n return True\n\ndef pathAvailable(puzzle,frontier,B,rows,columns,value):\n modified = []\n while not (not frontier):\n A = frontier.popleft()\n if A == B :\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n return True\n x = A[0]\n y = A[1]\n if puzzle[x][y] == '_':\n puzzle[x][y] = 'X'\n modified.append([x,y])\n #print(puzzle)\n neighbors = getNeighbor(A,rows,columns)\n #print(neighbors)\n for i in range(len(neighbors)):\n neighbor = neighbors[i]\n #print(neighbor)\n if puzzle[neighbor[0]][neighbor[1]] == '_' or neighbor == B:\n frontier.append(neighbor)\n\n for k in range(len(modified)):\n x = modified[k][0]\n y = modified[k][1]\n puzzle[x][y] = '_'\n return False\n\ndef getNeighbor(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1) # up\n i_upper = min(rows-1,pos[0]+1) # down\n j_lower = max(0,pos[1] - 1) # left\n j_upper = min(columns-1,pos[1] + 1) #right\n\n\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n return neighbors\n\ndef getNeighbor_new(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1)\n i_upper = min(columns-1,pos[0]+1)\n j_lower = max(0,pos[1] - 1)\n j_upper = min(rows-1,pos[1] + 1)\n #print(i_lower,i_upper,j_upper,j_lower)\n if (0 == pos[1]) or (rows-1 == pos[1]):\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n else:\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n return neighbors\n\n\ndef getNeighbor_ordered(pos,rows,columns):\n neighbors = []\n i_lower = max(0,pos[0] - 1)\n i_upper = min(columns-1,pos[0]+1)\n j_lower = max(0,pos[1] - 1)\n j_upper = min(rows-1,pos[1] + 1)\n #print(i_lower,i_upper,j_upper,j_lower)\n\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n # order them with respect to the distance to the wall\n distance = []\n neighborsNew = []\n for i in range(len(neighbors)):\n A = neighbors[i]\n dist = abs(A[0] - 0) * abs(A[0] - (rows - 1)) * abs(A[1] - 0) * abs(A[1] - (columns - 1))\n distance.append([dist, i])\n distance = (sorted(distance, key = lambda length:length[0]))\n for i in range(len(distance)):\n idx = distance[i][1]\n neighborsNew.append(neighbors[idx])\n return(neighborsNew)\n\ndef getNeighbor_newnew(pos,rows,columns,idx,sourceB):\n # if the neighbors contain the sourcenode, add it first\n neighbors = []\n i_lower = max(0,pos[0] - 1) # up\n i_upper = min(rows-1,pos[0]+1) # down\n j_lower = max(0,pos[1] - 1) # left\n j_upper = min(columns-1,pos[1] + 1) #right\n\n\n neighborsNew = []\n if (j_upper != pos[1]):\n neighbors.append([pos[0],j_upper])\n if (j_lower != pos[1]):\n neighbors.append([pos[0],j_lower])\n if (i_lower != pos[0]):\n neighbors.append([i_lower,pos[1]])\n if (i_upper != pos[0]):\n neighbors.append([i_upper,pos[1]])\n for i in range(len(neighbors)):\n if neighbors[i] == sourceB[idx]:\n neighbors[0],neighbors[i] = neighbors[i],neighbors[0]\n return(neighbors)\n\ndef heuristic(A,B,rows,columns):\n distance = []\n for i in range(len(A)):\n dist = abs(A[i][0] - 0) * abs(A[i][0] - (rows-1)) * abs(A[i][1] - 0) * abs(A[i][1]- (columns - 1))\n distance.append([dist,i])\n return(sorted(distance, key = lambda length:length[0]))\n\ndef ordering (idx,values,sourceA,sourceB,puzzle,rows,columns):\n order = []\n for i in range(idx+1,len(values)):\n A = sourceA[i]\n B = sourceB[i]\n neighborA = getNeighbor(A,rows,columns)\n neighborB = getNeighbor(B,rows,columns)\n count = 0\n for j in range(len(neighborA)):\n neighborAx = neighborA[j][0]\n neighborAy = neighborA[j][1]\n if puzzle[neighborAx][neighborAy] == \"_\":\n count += 1\n for j in range(len(neighborB)):\n neighborBx = neighborB[j][0]\n neighborBy = neighborB[j][1]\n if puzzle[neighborBx][neighborBy] == \"_\":\n count += 1\n order.append([count,i])\n order = sorted(order, key = lambda length:length[0])\n ordered_values = []\n ordered_sourceA = []\n ordered_sourceB = []\n for i in range(idx+1):\n ordered_values.append(values[i])\n ordered_sourceA.append(sourceA[i])\n ordered_sourceB.append(sourceB[i])\n for i in range(idx+1,len(values)):\n index = order[i - idx - 1][1]\n ordered_values.append(values[index])\n ordered_sourceA.append(sourceA[index])\n ordered_sourceB.append(sourceB[index])\n return ordered_values,ordered_sourceA,ordered_sourceB\n\n\n\ndef main():\n [puzzle, rows, columns, left, values,sourceA, sourceB] = read_Puzzle.generatePuzzle()\n print(puzzle)\n print(values)\n center = [math.floor(rows/2),math.floor(columns/2)]\n #order = heuristic(sourceA,sourceB,rows,columns)\n #order = order[::-1]\n idx = 0\n # for i in range(20000):\n # check2(puzzle,idx,values,sourceA,sourceB,rows,columns)\n # print(\"done\")\n Fordumb = []\n for i in range(len(values)):\n Fordumb.append([values[i],sourceA[i],sourceB[i]])\n shuffle(Fordumb)\n values_for_dumb = []\n sourceA_for_dumb = []\n sourceB_for_dumb = []\n for i in range(len(values)):\n values_for_dumb.append(Fordumb[i][0])\n sourceA_for_dumb.append(Fordumb[i][1])\n sourceB_for_dumb.append((Fordumb[i][2]))\n #order = [[0,0],[1,1],[2,2],[3,3],[4,4],[5,5],[6,6]]\n #pos_now = sourceA[order[idx][1]]\n pos_now = sourceA[0]\n # global count\n # count = 0\n #result = freeFlowSmart(puzzle,rows,columns,left, values,sourceA,sourceB,idx, [pos_now],order)\n #ordered_values,ordered_A,ordered_B = ordering(-1, values, sourceA, sourceB, puzzle, rows, columns)\n #pos_now = ordered_A[idx]\n start = time.time()\n result = freeFlowSmart(puzzle, rows, columns, left, values, sourceA, sourceB, idx, [pos_now])\n #result = freeFlowEcSmart(puzzle, rows, columns, left, values, sourceA, sourceB, idx, [pos_now])\n #result = freeFlowDumb(puzzle, rows, columns, left, values_for_dumb, sourceA_for_dumb, sourceB_for_dumb, idx, [sourceA_for_dumb[idx]])\n end = time.time()\n print(end-start)\n for i in range(rows):\n print(result[i])\n\nif __name__ == \"__main__\":\n main()", "sub_path": "Assignment2_Search_CSP/part1.py", "file_name": "part1.py", "file_ext": "py", "file_size_in_byte": 20961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "copy.deepcopy", "line_number": 23, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 211, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 240, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 249, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 333, "usage_type": "call"}, {"api_name": "read_Puzzle.generatePuzzle", "line_number": 540, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 543, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 553, "usage_type": "call"}, {"api_name": "time.time", "line_number": 569, "usage_type": "call"}, {"api_name": "time.time", "line_number": 573, "usage_type": "call"}]}
+{"seq_id": "435596602", "text": "from rdflib import Graph, BNode, Literal, URIRef, RDFS, RDF, plugin\nfrom rdflib.store import Store\nimport os\n\ndef test1():\n store = plugin.get('SQLAlchemy', Store)(\n identifier=URIRef(\"rdflib_test\"),\n configuration=Literal(\"sqlite:///%(here)s/development.sqlite\" % {\"here\": os.getcwd()}))\n g = Graph(store)\n statementId = BNode()\n print(len(g))\n g.add((statementId, RDF.type, RDF.Statement))\n g.add((statementId, RDF.subject, URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))\n g.add((statementId, RDF.predicate, RDFS.label))\n g.add((statementId, RDF.object, Literal(\"Conjunctive Graph\")))\n print(len(g))\n for s, p, o in g:\n print(type(s))\n\n for s, p, o in g.triples((None, RDF.object, None)):\n print(o)\n\n g.remove((statementId, RDF.type, RDF.Statement))\n print(len(g))\n os.unlink(\"%(here)s/development.sqlite\" % {\"here\": os.getcwd()})\n\n", "sub_path": "test/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rdflib.plugin.get", "line_number": 6, "usage_type": "call"}, {"api_name": "rdflib.store.Store", "line_number": 6, "usage_type": "argument"}, {"api_name": "rdflib.plugin", "line_number": 6, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 7, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "rdflib.Graph", "line_number": 9, "usage_type": "call"}, {"api_name": "rdflib.BNode", "line_number": 10, "usage_type": "call"}, {"api_name": "rdflib.RDF.type", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 12, "usage_type": "name"}, {"api_name": "rdflib.RDF.Statement", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rdflib.RDF.subject", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 13, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 13, "usage_type": "call"}, {"api_name": "rdflib.RDF.predicate", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 14, "usage_type": "name"}, {"api_name": "rdflib.RDFS.label", "line_number": 14, "usage_type": "attribute"}, {"api_name": "rdflib.RDFS", "line_number": 14, "usage_type": "name"}, {"api_name": "rdflib.RDF.object", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 15, "usage_type": "name"}, {"api_name": "rdflib.Literal", "line_number": 15, "usage_type": "call"}, {"api_name": "rdflib.RDF.object", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 20, "usage_type": "name"}, {"api_name": "rdflib.RDF.type", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rdflib.RDF", "line_number": 23, "usage_type": "name"}, {"api_name": "rdflib.RDF.Statement", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "21765081", "text": "from django.shortcuts import render\nfrom message.models import Message\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\n@login_required\ndef log_audit(request):\n '''\n 审计日志\n '''\n if request.user.is_superuser:\n logs = Message.objects.all()[:300]\n\n if request.method == 'GET':\n if 'aid' in request.GET:\n aid = request.get_full_path().split('=')[1]\n log_detail = Message.objects.filter(id=aid)\n data = {\n 'log_detail': log_detail,\n 'page_name': '日志明细'\n }\n return render(request, 'message/log_audit_detail.html',data)\n data = {\n 'all_logs':logs,\n 'page_name':'审计日志'\n }\n\n return render(request, 'message/log_audit.html', data)\n else:\n raise Http404\n", "sub_path": "message/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "message.models.Message.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "message.models.Message.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "message.models.Message", "line_number": 14, "usage_type": "name"}, {"api_name": "message.models.Message.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "message.models.Message.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "message.models.Message", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "395634859", "text": "#!/usr/bin/env python3\n# encoding:utf-8\n\nimport requests\nfrom pyquery import PyQuery as pq\nfrom fake_useragent import UserAgent\nfrom urllib.parse import quote\n\nfrom database import Database\n\n\nclass Zongheng():\n\n def __init__(self):\n self.db = Database()\n\n def get_book_information(self, book):\n name, source_id, *_, search = book\n book, author, category = [{}, {}, {}]\n print('search {} at {}'.format(name, search))\n # http://search.zongheng.com/search/all/永夜君王/1.html\n url = search + 'all/' + quote(name) + '/1.html'\n headers = {'Referer': search, 'User-Agent': UserAgent().random}\n result = pq(requests.get(url, headers=headers).text)('.search_text').eq(0)\n if pq(result)('a').eq(0).text().replace(' ','') == name:\n print('book {} found'.format(name))\n book['name'] = name\n book['source_id'] = source_id\n book['book_link'] = pq(result)('a').eq(0).attr('href')\n book['toc_link'] = pq(result)('.search_oprate')('.a_un').eq(1).attr('href')\n author['name'] = pq(result)('a').eq(1).text()\n author['link'] = pq(result)('a').eq(1).attr('href')\n category['name'] = pq(result)('a').eq(2).text()\n print(book, author, category)\n else:\n print(\"book {} not found\".format(name))\n return book, author, category\n\n def get_chapters(self, book):\n book_id, name, toc_link, source_id, source_name = book\n chapters = []\n print('get chapter list for {}'.format(name))\n headers = {'User-Agent': UserAgent().random}\n results = pq(requests.get(toc_link, headers=headers).text)('#chapterListPanel')('.chapterBean')\n for r in results:\n chapter, chapter_list = [{}, {}]\n chapter['name'] = pq(r)('td').attr('chaptername')\n chapter['book_id'] = book_id\n chapter['is_new'] = True\n chapter['update_time'] = pq(r)('td').attr('updatetime')\n chapter['word_num'] = pq(r)('td').attr('wordnum')\n chapter_list['source_id'] = source_id\n chapter_list['link'] = pq(r)('a').attr('href')\n chapters.append(chapter)\n\n def update_chapters(self, book):\n id, name, link, source = book", "sub_path": "cashew/lidl/zongheng.py", "file_name": "zongheng.py", "file_ext": "py", "file_size_in_byte": 2292, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "database.Database", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 22, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 23, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 25, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 29, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 30, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 31, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 32, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 33, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 43, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 47, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 50, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 51, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "562241773", "text": "import pygame\r\nimport random as r\r\nimport math\r\nfrom pygame import mixer\r\n\r\n# Initialization of package\r\npygame.init()\r\n\r\n# screen creation\r\nscreen = pygame.display.set_mode((800, 600))\r\n\r\n# Title and icon\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon = pygame.image.load(\"space.png\")\r\npygame.display.set_icon(icon)\r\n\r\n# Background music\r\nmixer.music.load('background.wav')\r\nmixer.music.play(-1)\r\n\r\n# Background\r\nbackground = pygame.image.load('background.png')\r\n\r\n# Player\r\nplayerImg = pygame.image.load(\"player.png\")\r\nplayerX = 370\r\nplayerY = 480\r\nplayerX_change = 0\r\n\r\n# Enemy\r\nenemyImg = list()\r\nenemyX = list()\r\nenemyY = list()\r\nenemyX_change = list()\r\nenemyY_change = list()\r\nnum_of_enemies = 6\r\nfor i in range(num_of_enemies):\r\n enemyImg.append(pygame.image.load(\"enemy.png\"))\r\n enemyX.append(r.randint(0, 735))\r\n enemyY.append(r.randint(50, 150))\r\n enemyX_change.append(6)\r\n enemyY_change.append(55)\r\n\r\n# Bullet\r\nbulletImg = pygame.image.load(\"bullet.png\")\r\nbulletX = 0\r\nbulletY = 480\r\nbulletX_change = 0\r\nbulletY_change = 8\r\n\"\"\" Ready - You can't see bullet on the screen\r\n Fire -- The bullet is currently moving\r\n\"\"\"\r\nbullet_state = \"ready\"\r\n\r\n# Score\r\nscore_value = 0\r\nfont = pygame.font.Font('freesansbold.ttf', 32)\r\n\r\ntextX = 10\r\ntextY = 10\r\n\r\n# Game over text\r\nover_font = pygame.font.Font('freesansbold.ttf', 64)\r\n\r\n\r\ndef show_score(x, y):\r\n score = font.render(\"Score: \" + str(score_value), True, (255, 255, 255))\r\n screen.blit(score, (x, y))\r\n\r\n\r\ndef game_over_text():\r\n over_text = over_font.render('GAME OVER', True, (255, 255, 255))\r\n screen.blit(over_text, (200, 250))\r\n\r\n\r\ndef player(x, y):\r\n screen.blit(playerImg, (x, y))\r\n\r\n\r\ndef enemy(x, y, i):\r\n screen.blit(enemyImg[i], (x, y))\r\n\r\n\r\ndef fire_bullet(x, y):\r\n global bullet_state\r\n bullet_state = \"fire\"\r\n screen.blit(bulletImg, (x + 16, y + 10))\r\n\r\n\r\ndef is_collision(ex, ey, bx, by):\r\n distance = math.sqrt((math.pow(ex - bx, 2)) + (math.pow(ey - by, 2)))\r\n # print(distance)\r\n if distance < 27:\r\n return True\r\n return False\r\n\r\n\r\n# Game Loop\r\nrunning = True\r\n\r\nwhile running:\r\n # RGB - red, green, blue\r\n screen.fill((255, 255, 255))\r\n screen.blit(background, (0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n # to check for keystroke whether it's right or left\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n playerX_change = -5\r\n if event.key == pygame.K_RIGHT:\r\n playerX_change = 5\r\n if event.key == pygame.K_SPACE:\r\n if bullet_state is \"ready\":\r\n bullet_sound = mixer.Sound('laser.wav')\r\n bullet_sound.play()\r\n bulletX = playerX\r\n fire_bullet(bulletX, bulletY)\r\n\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n playerX_change = 0\r\n\r\n playerX += playerX_change\r\n\r\n if playerX <= 0:\r\n playerX = 0\r\n elif playerX > 736:\r\n playerX = 736\r\n\r\n for i in range(num_of_enemies):\r\n\r\n if enemyY[i] >= 350:\r\n for j in range(num_of_enemies):\r\n enemyY[j] = 1000\r\n game_over_text()\r\n break\r\n enemyX[i] += enemyX_change[i]\r\n if enemyX[i] <= 0:\r\n enemyX_change[i] = 2\r\n enemyY[i] += enemyY_change[i]\r\n elif enemyX[i] >= 736:\r\n enemyX_change[i] = -2\r\n enemyY[i] += enemyY_change[i]\r\n\r\n # Collision\r\n collision = is_collision(enemyX[i], enemyY[i], bulletX, bulletY)\r\n if collision:\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n score_value += 10\r\n enemyX[i] = r.randint(0, 735)\r\n enemyY[i] = r.randint(50, 150)\r\n\r\n explosion_sound = mixer.Sound('explosion.wav')\r\n explosion_sound.play()\r\n enemy(enemyX[i], enemyY[i], i)\r\n # Bullet movement\r\n\r\n if bulletY <= 0:\r\n bulletY = 480\r\n bullet_state = \"ready\"\r\n\r\n if bullet_state is \"fire\":\r\n fire_bullet(bulletX, bulletY)\r\n bulletY -= bulletY_change\r\n\r\n player(playerX, playerY)\r\n show_score(textX, textY)\r\n pygame.display.update()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 63, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 117, "usage_type": "name"}, {"api_name": "pygame.KEYUP", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 123, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 154, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 155, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 157, "usage_type": "name"}, {"api_name": "pygame.display.update", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 172, "usage_type": "attribute"}]}
+{"seq_id": "337907518", "text": "import numpy\nimport talib\nfrom logic import MarketTrend\nfrom logic import Indicator, validate_datapoint\nfrom logic.candle import Candle\n\n\nclass TakeProfit(Indicator):\n\n def __init__(self, atr_period_length=7):\n super(TakeProfit, self).__init__()\n self.period = atr_period_length\n self._high = []\n self._low = []\n self._close = []\n self.position_type = MarketTrend.ENTER_LONG\n self.current_takeprofit_price = 0.0\n self.state = MarketTrend.NO_STOP\n\n def GetState(self):\n return self.state\n\n def seen_enough_data(self):\n return self.period <= len(self._high)\n\n def AmountOfDataStillMissing(self):\n return max(0, self.period - len(self._high))\n\n def Tickerupdate(self, datapoint):\n if not validate_datapoint(datapoint):\n return\n\n # Check if it is time to do a stop loss trade\n if (self.current_takeprofit_price > 0.0):\n if (self.position_type == MarketTrend.ENTER_LONG):\n if (datapoint[\"value\"] > self.current_takeprofit_price):\n # Should sell Long position\n self.state = MarketTrend.STOP_LONG\n self.current_takeprofit_price = 0.0\n elif (self.position_type == MarketTrend.ENTER_SHORT):\n if (datapoint[\"value\"] < self.current_takeprofit_price):\n # Should buy back short position\n self.state = MarketTrend.STOP_SHORT\n self.current_takeprofit_price = 0.0\n\n def update(self, datapoint):\n\n if not isinstance(datapoint, Candle):\n self.Tickerupdate(datapoint)\n return\n\n self._high.append(datapoint.high)\n self._low.append(datapoint.low)\n self._close.append(datapoint.close)\n\n if (len(self._high) > self.period):\n self._close.pop(0)\n self._low.pop(0)\n self._high.pop(0)\n\n def SetTakeProfit(self, price, position_type=MarketTrend.ENTER_LONG):\n if (position_type != MarketTrend.ENTER_LONG and\n position_type != MarketTrend.ENTER_SHORT):\n return\n if (price <= 0.0):\n return\n self.position_type = position_type\n self.current_takeprofit_price = price\n self.state = MarketTrend.NO_STOP\n\n def GetPrice(self, position_type=MarketTrend.ENTER_LONG):\n\n if (not self.seen_enough_data()):\n return numpy.nan\n\n high = numpy.array(self._high, dtype=float)\n low = numpy.array(self._low, dtype=float)\n close = numpy.array(self._close, dtype=float)\n ATR = talib.ATR(high, low, close, timeperiod=self.period - 1)[-1]\n takeprofit_price = self._close[-1]\n\n if (position_type == MarketTrend.ENTER_LONG):\n takeprofit_price += 1.0 * ATR\n elif (position_type == MarketTrend.ENTER_SHORT):\n takeprofit_price -= 1.0 * ATR\n else:\n takeprofit_price = numpy.nan\n\n return takeprofit_price\n\n def CancelTakeProfit(self):\n self.state = MarketTrend.NO_STOP\n self.current_takeprofit_price = 0.0\n\n def IsSet(self):\n return self.current_takeprofit_price != 0.0\n", "sub_path": "logic/takeprofit.py", "file_name": "takeprofit.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logic.Indicator", "line_number": 8, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 16, "usage_type": "name"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 18, "usage_type": "name"}, {"api_name": "logic.validate_datapoint", "line_number": 30, "usage_type": "call"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 35, "usage_type": "name"}, {"api_name": "logic.MarketTrend.STOP_LONG", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 38, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 40, "usage_type": "name"}, {"api_name": "logic.MarketTrend.STOP_SHORT", "line_number": 43, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 43, "usage_type": "name"}, {"api_name": "logic.candle.Candle", "line_number": 48, "usage_type": "argument"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 61, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 61, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 62, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 62, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 63, "usage_type": "name"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 69, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 71, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "talib.ATR", "line_number": 79, "usage_type": "call"}, {"api_name": "logic.MarketTrend.ENTER_LONG", "line_number": 82, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 82, "usage_type": "name"}, {"api_name": "logic.MarketTrend.ENTER_SHORT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 87, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend.NO_STOP", "line_number": 92, "usage_type": "attribute"}, {"api_name": "logic.MarketTrend", "line_number": 92, "usage_type": "name"}]}
+{"seq_id": "651752315", "text": "from PySide2.QtWidgets import QTextEdit\nfrom PySide2.QtGui import QSyntaxHighlighter, QColor, QTextCharFormat#, QFont\nfrom PySide2.QtCore import QRegExp\n\nfrom shiboken2 import wrapInstance\nfrom maya.OpenMayaUI import MQtUtil \n\n\n# dependancies: tk_scriptEditorOutput.mel\n\nclass SH(QSyntaxHighlighter):\n\t'''\n\tSyntax Highlight class, used by all SK_*_Codes SHs\n\t:param parent: parent's widget\n\t'''\n\tdef __init__(self, parent):\n\t\tQSyntaxHighlighter.__init__(self, parent) #inherit\n\t\tself.parent = parent #define parent explicitly\n\t\t\n\tdef highlightBlock(self, text):\n\t\t# Derived from Qt function, used to apply color-syntaxing to text\n\t\t# :param text: text input\n\t\t\n\t\trules = [(QColor( 90, 90, 90), r\"^(//|#).+$\"), #grey 90, 90, 90\n\t\t\t\t (QColor(205, 200, 120), r\"^(//|#) Warning.+$\"), #yellow 205, 200, 120\n\t\t\t\t (QColor(165, 75, 75), r\"^(//|#).+Error.+$\"), #red 165, 75, 75\n\t\t\t\t (QColor(115, 215, 150), r\"^(//|#).+Result.+$\")] #green 115, 215, 150\n\t\t# loop through rules\n\t\tfor color, pattern in rules:\n\t\t\tkeyword = QTextCharFormat()\n\t\t\tkeyword.setForeground(color)\n\t\t\t# get regexp pattern\n\t\t\texpression = QRegExp(pattern)\n\t\t\tindex = expression.indexIn(text)\n\t\t\t# loop until all matches are done\n\t\t\twhile index >= 0:\n\t\t\t\tlength = expression.matchedLength()\n\t\t\t\t# format text with current formatting\n\t\t\t\tself.setFormat(index, length, keyword)\n\t\t\t\tindex = expression.indexIn(text, index + length)\n\t\tself.setCurrentBlockState(0)\n\n\ndef wrap():\n\ti=1\n\twhile i:\n\t\ttry:\n\t\t\tse_edit = wrapInstance(long(MQtUtil.findControl('cmdScrollFieldReporter%i' %i)), QTextEdit)\n\t\t\tbreak\n\t\texcept TypeError:\n\t\t\ti+=1\n\tsyntax_highlighter = SH(se_edit)\n\n\t#untested. send to $tk_cmdScrollFieldReporter explicitly. used in place of above code.\n\t# cmdScrollFieldReporter = \"$tk_cmdScrollFieldReporter\"\n\t# se_edit = wrapInstance(long(MQtUtil.findControl(cmdScrollFieldReporter)), QTextEdit)\n\t# syntax_highlighter = SH(se_edit)\n \n\n\n\t#unused from original script\n\t# # try:\n\t# # syntax_highlighter.deleteLater()\n\t# # except:\n\t# # pass", "sub_path": "maya/scriptEditorOutputTextHighlighting.py", "file_name": "scriptEditorOutputTextHighlighting.py", "file_ext": "py", "file_size_in_byte": 2041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PySide2.QtGui.QSyntaxHighlighter", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QSyntaxHighlighter.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QSyntaxHighlighter", "line_number": 17, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 24, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 25, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 26, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 27, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QTextCharFormat", "line_number": 30, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QRegExp", "line_number": 33, "usage_type": "call"}, {"api_name": "shiboken2.wrapInstance", "line_number": 48, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QTextEdit", "line_number": 48, "usage_type": "argument"}, {"api_name": "maya.OpenMayaUI.MQtUtil.findControl", "line_number": 48, "usage_type": "call"}, {"api_name": "maya.OpenMayaUI.MQtUtil", "line_number": 48, "usage_type": "name"}]}
+{"seq_id": "187910194", "text": "#!/usr/bin/python3\n\nfrom sys import stdin\nfrom itertools import repeat\n\n\ndef fold(a, b):\n a.append((float('inf'),'inf'))\n b.append((float('inf'),'inf'))\n i = 0\n j = 0\n r = []\n for k in range(len(a)+len(b)-2):\n if (a[i][0] <= b[j][0]):\n r.append(a[i])\n i = i + 1\n elif (a[i][0] > b[j][0]):\n r.append(b[j])\n j = j + 1\n return r\n\ndef merge(decks):\n # SKRIV DIN KODE HER\n result = []\n for k in decks:\n result = (fold(result, k))\n a = []\n for t in result:\n a.append(t[1])\n return ''.join(a)\n \n \n\n\ndef main():\n # Read input.\n decks = []\n for line in stdin:\n (index, csv) = line.strip().split(':')\n deck = list(zip(map(int, csv.split(',')), repeat(index)))\n decks.append(deck)\n # Merge the decks and print result.\n print(merge(decks))\n\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "Oving3/kortstokker.py", "file_name": "kortstokker.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.stdin", "line_number": 38, "usage_type": "name"}, {"api_name": "itertools.repeat", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "248478467", "text": "import pygame\r\n\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Lift(Sprite):\r\n\r\n\t\"\"\"A class to manage bullets fired from the ship\"\"\"\r\n\r\n\tdef __init__(self, ai_settings, screen, nuke):\r\n\r\n\t\t\"\"\"Create a bullet object at the ship's current position.\"\"\"\r\n\r\n\t\tsuper(Lift, self).__init__()\r\n\r\n\t\tself.screen = screen\r\n\t\tself.image = pygame.image.load('images/turretlift.gif')\r\n\t\t# Create a bullet rect at (0, 0) and then set correct position.\r\n\r\n\t\tself.rect = pygame.Rect(0, 0, 100, 100)\r\n\r\n\t\tself.rect.x = 200\r\n\t\tself.rect.y = (nuke.rect.y + nuke.rect.height)\r\n\r\n\r\n\t\t# Store the bullet's position as a decimal value.\r\n\r\n\t\t\r\n\r\n\t\tself.color = ai_settings.ground_color\r\n\r\n\t\r\n\r\n\t\t\r\n\r\n\tdef blitme(self, nuke):\r\n\r\n\t\t\"\"\"Draw the bullet to the screen.\"\"\"\r\n\r\n\t\tself.rect.y = (nuke.rect.y + nuke.rect.height)\r\n\t\tself.screen.blit(self.image, self.rect)\r\n\r\n\r\n", "sub_path": "Zombies/turretlift.py", "file_name": "turretlift.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "574672433", "text": "# import pickle\n\n# boxes = pickle.load(open('/home/junjie/Code/faster-rcnn-gcn/output/res101/voc_2007_test/rec_bgul/detections.pkl', 'rb'))\n# re_class_boxes = pickle.load(open('/home/junjie/Code/faster-rcnn-gcn/output/res101/voc_2007_test/origin/detections.pkl', 'rb'))\n\n\n# pass\n\n\nimport json\ncoco = json.load(open('/home/junjie/Code/Datasets/COCO/annotations/instances_trainval2014.json', 'rb'))\n\nclass_name = [ 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n\nclass_name = [ 'airplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'dining table', 'dog', 'horse',\n 'motorcycle', 'person', 'potted plant',\n 'sheep', 'couch', 'train', 'tv']\n\n\ni = 0\nindex = []\nfor cat in coco['categories']:\n if cat['name'] in class_name:\n print(cat['name'])\n i += 1\n index.append(cat['id'])\n \nprint(i)\n\nprint(index)\npass", "sub_path": "utils/read_output.py", "file_name": "read_output.py", "file_ext": "py", "file_size_in_byte": 1205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "382689673", "text": "from keras.models import load_model\nfrom keras.models import model_from_json\nfrom keras.applications import imagenet_utils\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.preprocessing import scale\nimport os, sys\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom keras.applications import imagenet_utils\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# dimensions of our images\nimg_width, img_height = 224, 224\ncatsPredicted, birdsPredicted, NothingPredicted = 0, 0, 0\nfolder =['/home/ioannis/Desktop/foto/cats',\n '/home/ioannis/Desktop/foto/birds',\n '/home/ioannis/Desktop/foto/nothing']\n\n\n\n#load the model we created\njson_file = open('/home/ioannis/Desktop/model_l2.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weight into model\nloaded_model.load_weights(\"/home/ioannis/Desktop/model_l2.h5\")\nprint(\"\\nModel successfully loaded from disk! \")\n\n#print model summary\nprint(loaded_model.summary())\n\n#Predict image\ndef predict_image(image):\n img =image.load_img(image, target_size=(224, 224))\n img = np.asarray(img,'float32')/255.0\n image = np.expand_dims(img, axis = 0)\n preds = loaded_model.predict(image)\n print(\"\\rImage is : \" + image)\n #pred_classes = np.argmax(preds)\n print(preds)\n print(pred_classes)\n\nfor subfolder in folder :\n catsPredicted, birdsPredicted, NothingPredicted = 0, 0, 0\n print(\"\\nPredicting\",subfolder , \"images\")\n for filename in os.listdir(subfolder):\n #print(filename)\n x = subfolder +'/'+filename\n img =image.load_img(x, target_size=(224, 224))\n img1 = np.asarray(img,'float32')/255.0\n image2 = np.expand_dims(img1, axis = 0)\n preds = loaded_model.predict(image2)\n birdsPredicted +=preds[0,0]\n catsPredicted += preds[0,1]\n NothingPredicted += preds[0,2]\n catmeans = catsPredicted /50\n birdsmean = birdsPredicted /50\n nothingmean = NothingPredicted /50\n allmeans = [round(catmeans, 2) , round(birdsmean, 2), round(nothingmean, 2)]\n print(' Cat | Bird | Nothing')\n print(allmeans)\n", "sub_path": "Neural_Networks/Deep_Learning/Part_2/load_model.py", "file_name": "load_model.py", "file_ext": "py", "file_size_in_byte": 2172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "keras.models.model_from_json", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 36, "usage_type": "argument"}, {"api_name": "numpy.asarray", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 39, "usage_type": "argument"}, {"api_name": "keras.preprocessing.image", "line_number": 40, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "508886699", "text": "# Import required libraries\nimport pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',\n\t\t\t\t\t\t\t\t\t\tstyle={'textAlign': 'center', 'color': '#503D36',\n\t\t\t\t\t\t\t\t\t\t\t 'font-size': 40}),\n\t\t\t\t\t\t\t\t# TASK 1: Add a dropdown list to enable Launch Site selection\n\t\t\t\t\t\t\t\t# The default select value is for ALL sites\n\t\t\t\t\t\t\t\tdcc.Dropdown(\n\t\t\t\t\t\t\t\t\tid='site-dropdown',\n\t\t\t\t\t\t\t\t\toptions=[\n\t\t\t\t\t\t\t\t\t\t{'label':'ALL', 'value':'ALL'},\n\t\t\t\t\t\t\t\t\t\t{'label':'CCAFS LC-40', 'value':'CCAFS LC-40'},\n\t\t\t\t\t\t\t\t\t\t{'label':'CCAFS SLC-40', 'value':'CCAFS SLC-40'},\n\t\t\t\t\t\t\t\t\t\t{'label':'KSC LC-39A', 'value':'KSC LC-39A'},\n\t\t\t\t\t\t\t\t\t\t{'label':'VAFB SLC-4E', 'value':'VAFB SLC-4E'}\n\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\tvalue='ALL',\n\t\t\t\t\t\t\t\t\tplaceholder='Select a Launch Site here',\n\t\t\t\t\t\t\t\t\tsearchable=True\n\t\t\t\t\t\t\t\t),\n\n\t\t\t\t\t\t\t\t# TASK 2: Add a pie chart to show the total successful launches count for all sites\n\t\t\t\t\t\t\t\t# If a specific launch site was selected, show the Success vs. Failed counts for the site\n\t\t\t\t\t\t\t\thtml.Div(dcc.Graph(id='success-pie-chart')),\n\t\t\t\t\t\t\t\thtml.Br(),\n\n\t\t\t\t\t\t\t\thtml.P(\"Payload range (Kg):\"),\n\t\t\t\t\t\t\t\t# TASK 3: Add a slider to select payload range\n\t\t\t\t\t\t\t\tdcc.RangeSlider(\n\t\t\t\t\t\t\t\t\tid='payload-slider',\n\t\t\t\t\t\t\t\t\tmin=0,\n\t\t\t\t\t\t\t\t\tmax=10000,\n\t\t\t\t\t\t\t\t\tstep=1000,\n\t\t\t\t\t\t\t\t\tmarks={\n\t\t\t\t\t\t\t\t\t\t0:'0',\n\t\t\t\t\t\t\t\t\t\t2500:'2500',\n\t\t\t\t\t\t\t\t\t\t5000:'5000',\n\t\t\t\t\t\t\t\t\t\t7500:'7500',\n\t\t\t\t\t\t\t\t\t\t10000:'10,000'\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tvalue=[min_payload, max_payload]\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\thtml.Br(),\n\n\t\t\t\t\t\t\t\t# TASK 4: Add a scatter chart to show the correlation between payload and launch success\n\t\t\t\t\t\t\t\thtml.Div(dcc.Graph(id='success-payload-scatter-chart')),\n\t\t\t\t\t\t\t\t])\n\n# TASK 2:\n# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output\n\n@app.callback( Output(component_id='success-pie-chart', component_property='figure'),\n\t\t\t Input(component_id='site-dropdown', component_property='value'))\n\ndef build_pie(site):\n\n\tdf_success = spacex_df[(spacex_df['class']==1)]\n\tdf_x = pd.DataFrame(df_success.groupby(['Launch Site'])['class'].value_counts())\n\tdf = spacex_df.copy()\n\n\tif site == 'ALL':\n\n\t\t#Success Counts for ALL Sites\n\t\tfig = px.pie(df_x,values=\"class\", names=\"class\", title='Success Counts for ALL Sites')\n\t\treturn fig\n\n\telse:\n\t\tddf = df[df['Launch Site']==site]\n\t\tddfg = pd.DataFrame(ddf.groupby(['Launch Site','class'])['class'].value_counts())\n\t\tfig = px.pie(ddfg, values='class', names='class', title='Succes count of '+site)\n\n\t\treturn fig\n\n# TASK 4:\n# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\n\n@app.callback(Output(component_id='success-payload-scatter-chart', component_property='figure'),\n\t\t\t [Input(component_id='site-dropdown', component_property='value'),\n\t\t\t\tInput(component_id='payload-slider', component_property='value')])\n\ndef build_scatter(site,payload):\n\n\tlow,high = (payload[0], payload[1])\n\tdf = spacex_df #copy not needed\n\t# filter your weights out here since you need to filter it whether all sites or an individual site is selected\n\tfiltered_dfa = df[df['Payload Mass (kg)'].between(low,high)]\n\n\tif site == 'ALL':\n\t\tfig = px.scatter(filtered_dfa,x=\"Payload Mass (kg)\", y=\"class\", color=\"Booster Version Category\", title='Payload vs. Outcome for All Sites')\n\telse:\n\t\t# now we can use our filtered payload weights to filter further by site in our else statement\n\t\tfiltered_dfb = filtered_dfa[filtered_dfa['Launch Site'] == site]\n\t\tfig = px.scatter(filtered_dfb,x=\"Payload Mass (kg)\", y=\"class\", color=\"Booster Version Category\", title='Payload vs. Outcome for' + site)\n\t# now we can return fig once at the end of our function since fig is what we want either way.\n\t# our if else will produce a different fig for us based on the condition, but variable name is the same\n\treturn fig\n# Run the app\nif __name__ == '__main__':\n\tapp.run_server(debug=True, use_reloader=False, port=8051)\n", "sub_path": "spacex_dash_app.py", "file_name": "spacex_dash_app.py", "file_ext": "py", "file_size_in_byte": 4338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 15, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 18, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 23, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 39, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 39, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 40, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 42, "usage_type": "call"}, {"api_name": "dash_core_components.RangeSlider", "line_number": 44, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 79, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 79, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 85, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 85, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 67, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 68, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 104, "usage_type": "name"}, {"api_name": "plotly.express.scatter", "line_number": 108, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 108, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 92, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 93, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "201415950", "text": "from django.conf import settings\nfrom django.contrib.auth import (\n REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout,\n)\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404, redirect, render, resolve_url\nfrom django.utils import translation\nfrom django.utils.http import is_safe_url\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.i18n import set_language as set_language_view\n\nfrom apps.organization.models import Location, Membership, Organization\nfrom apps.scheduling.models import Availability, BartenderAvailability, Event\n\nfrom .forms import RegisterForm\n\n\ndef _get_login_redirect_url(request, redirect_to):\n # Ensure the user-originating redirection URL is safe.\n if not is_safe_url(url=redirect_to, host=request.get_host()):\n return resolve_url(settings.LOGIN_REDIRECT_URL)\n return redirect_to\n\n\n@sensitive_post_parameters('password')\n@csrf_protect\ndef login(request):\n redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, ''))\n\n if request.user.is_authenticated():\n redirect_to = _get_login_redirect_url(request, redirect_to)\n if redirect_to == request.path:\n raise ValueError(\n \"Redirection loop for authenticated user detected. Check that \"\n \"your LOGIN_REDIRECT_URL doesn't point to a login page.\"\n )\n return redirect(redirect_to)\n elif request.method == 'POST':\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n user = form.get_user()\n auth_login(request, user)\n\n # Primaire organisatie instellen\n if hasattr(user, 'profile') and user.profile.current_organization:\n request.session['organization_pk'] = user.profile.current_organization.pk\n\n # Taal instellen\n if hasattr(user, 'profile') and user.profile.current_language:\n translation.activate(user.profile.current_language)\n request.session[translation.LANGUAGE_SESSION_KEY] = user.profile.current_language\n\n if not user.first_name or not user.email:\n # User information is not complete, redirect to register page.\n return redirect(register)\n\n return redirect(_get_login_redirect_url(request, redirect_to))\n else:\n form = AuthenticationForm(request)\n\n redirect_field_name = REDIRECT_FIELD_NAME\n\n return render(request, 'general/login.html', locals())\n\n\ndef logout(request):\n auth_logout(request)\n return redirect(settings.LOGIN_URL)\n\n\n@login_required\ndef register(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('schedule')\n else:\n form = RegisterForm(instance=request.user)\n\n return render(request, 'general/register.html', locals())\n\n\n@login_required\ndef change_current_organization(request, organization):\n org = get_object_or_404(Organization, slug=organization)\n request.session['organization_pk'] = org.pk\n request.user.profile.current_organization = org\n request.user.profile.save()\n return redirect(request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, '')))\n\n\ndef change_current_language(request):\n response = set_language_view(request)\n if hasattr(request.user, 'profile'):\n request.user.profile.current_language = request.session[translation.LANGUAGE_SESSION_KEY]\n request.user.profile.save()\n return response\n\n\ndef about(request):\n count = {\n 'organizations': Organization.objects.count(),\n 'users': User.objects.count(),\n 'tenders': Membership.objects.values('user_id').distinct().count(),\n 'locations': Location.objects.count(),\n 'public_locations': Location.objects.filter(is_public=True).count(),\n 'first_event': Event.objects.order_by('starts_at')[0],\n 'events': Event.objects.count(),\n 'bartender_availabilities': BartenderAvailability.objects.count(),\n 'bartender_availabilities_yes': BartenderAvailability.objects.filter(\n availability__nature__in=(Availability.ASSIGNED, Availability.YES),\n ).count(),\n 'bartender_availabilities_assigned': BartenderAvailability.objects.filter(\n availability__nature=Availability.ASSIGNED,\n ).count(),\n }\n return render(request, 'general/about.html', locals())\n\n\ndef help(request):\n return render(request, 'general/help.html')\n", "sub_path": "apps/general/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.utils.http.is_safe_url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.resolve_url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_REDIRECT_URL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.translation.activate", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 53, "usage_type": "name"}, {"api_name": "django.utils.translation.LANGUAGE_SESSION_KEY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.utils.translation", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.decorators.debug.sensitive_post_parameters", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_protect", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.conf.settings.LOGIN_URL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 71, "usage_type": "name"}, {"api_name": "forms.RegisterForm", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 82, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 89, "usage_type": "call"}, {"api_name": "apps.organization.models.Organization", "line_number": 89, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth.REDIRECT_FIELD_NAME", "line_number": 93, "usage_type": "argument"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 87, "usage_type": "name"}, {"api_name": "django.views.i18n.set_language", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.translation.LANGUAGE_SESSION_KEY", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.utils.translation", "line_number": 99, "usage_type": "name"}, {"api_name": "apps.organization.models.Organization.objects.count", "line_number": 106, "usage_type": "call"}, {"api_name": "apps.organization.models.Organization.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Organization", "line_number": 106, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 107, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 107, "usage_type": "name"}, {"api_name": "apps.organization.models.Membership.objects.values", "line_number": 108, "usage_type": "call"}, {"api_name": "apps.organization.models.Membership.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Membership", "line_number": 108, "usage_type": "name"}, {"api_name": "apps.organization.models.Location.objects.count", "line_number": 109, "usage_type": "call"}, {"api_name": "apps.organization.models.Location.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Location", "line_number": 109, "usage_type": "name"}, {"api_name": "apps.organization.models.Location.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "apps.organization.models.Location.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "apps.organization.models.Location", "line_number": 110, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Event.objects.order_by", "line_number": 111, "usage_type": "call"}, {"api_name": "apps.scheduling.models.Event.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Event", "line_number": 111, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Event.objects.count", "line_number": 112, "usage_type": "call"}, {"api_name": "apps.scheduling.models.Event.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Event", "line_number": 112, "usage_type": "name"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.count", "line_number": 113, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 113, "usage_type": "name"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.filter", "line_number": 114, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 114, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.ASSIGNED", "line_number": 115, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Availability", "line_number": 115, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.YES", "line_number": 115, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects.filter", "line_number": 117, "usage_type": "call"}, {"api_name": "apps.scheduling.models.BartenderAvailability.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.BartenderAvailability", "line_number": 117, "usage_type": "name"}, {"api_name": "apps.scheduling.models.Availability.ASSIGNED", "line_number": 118, "usage_type": "attribute"}, {"api_name": "apps.scheduling.models.Availability", "line_number": 118, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}]}
+{"seq_id": "564557471", "text": "import os\nimport cProfile\n\nimport numpy as np\n\nfrom datetime import datetime\nfrom shutil import copyfile\nfrom random import random\nfrom math import floor\nfrom time import clock\n\nfrom actual_npylm import NestedPitmanYorLanguageModel\nfrom plot_utils import plot_line_graph\nfrom probability_utils import word_kl_divergence\n\n\nclass NestedPitmanYorLanguageModelWrapper:\n @staticmethod\n def load_text(path, avoid_spaces=False):\n \"\"\"\n Loads the text in the file located at path and returns it as a list of sentences. \n Also returns the symbol alphabet used in the text.\n The file must be structured as one sentence per line.\n\n :param path: Path to file containing text. \n :param avoid_spaces: Whether to ignore spaces in the generated alphabet.\n :return: Tuple of list of sentences and alphabet used in sentences.\n \"\"\"\n with open(path, \"r\", encoding=\"utf8\") as f:\n sentences = f.readlines()\n sentences = [x.strip() for x in sentences]\n\n alphabet = dict()\n for s in sentences:\n for ch in s:\n if ch not in alphabet:\n alphabet[ch] = 1\n\n # Remove space char if necessary.\n if avoid_spaces:\n if \" \" in alphabet:\n del alphabet[\" \"]\n\n return sentences, alphabet\n\n @staticmethod\n def get_mean_word_length(sentences):\n \"\"\"\n Computes the mean word length of the input list of sentences.\n \n :param sentences: List of sentences.\n :return: Mean word length.\n \"\"\"\n\n word_count = 0\n total_word_length = 0\n\n for s in sentences:\n total_word_length += len(s.replace(\" \", \"\"))\n word_count += len(s.split())\n\n return total_word_length / word_count\n\n @staticmethod\n def write_report(model_report_file_path, model_name, total_run_time, mean_run_time, data_symbol_count,\n mean_output_word_length, mean_input_word_length, correct_segmentation_percentage, kl_divergence):\n \"\"\"\n Writes the model report to the specified file path.\n \n :param model_report_file_path: Path to report file.\n :param model_name: Name of the model.\n :param total_run_time: Total run time.\n :param mean_run_time: Mean run time per iteration.\n :param data_symbol_count: The number of different symbols in the input data.\n :param mean_output_word_length: The mean output word length.\n :param mean_input_word_length: The mean input word length.\n :param correct_segmentation_percentage: The percentage of (absolutely) correct segmentations.\n :param kl_divergence: The KL divergence between the output and the input.\n \"\"\"\n\n s = \"\"\n s += \"Report for \" + model_name + \"\\n\\n\"\n s += \"\\nBenchmarks:\\nTotal run time: \\t\\t\\t\" + str(total_run_time) + \"\\n\"\n s += \"Mean run time: \\t\\t\\t\\t\" + str(mean_run_time) + \"\\n\"\n s += \"\\nPerformance:\\nData symbol count: \\t\\t\\t\" + str(data_symbol_count) + \"\\n\"\n s += \"Mean output word length: \\t\\t\" + str(mean_output_word_length) + \"\\n\"\n s += \"Mean input word length: \\t\\t\" + str(mean_input_word_length) + \"\\n\"\n s += \"Correct segmentation percentage: \\t\" + str(correct_segmentation_percentage) + \"\\n\"\n s += \"KL divergence: \\t\\t\\t\\t\" + str(kl_divergence) + \"\\n\"\n\n with open(model_report_file_path, \"w\") as f:\n f.write(s)\n\n @staticmethod\n def generate_plots(path, analysis_data):\n \"\"\"\n Generates plots and saves them as .png into the specified folder.\n \n :param path: Path to output folder.\n :param analysis_data: Analysis data used for generating plots. It must be a list with elements\n of form (iteration index, run time, KL divergence).\n \"\"\"\n\n path_to_runtime_plot = os.path.join(path, \"Runtime Plot.png\")\n path_to_kl_plot = os.path.join(path, \"KL Divergence Plot.png\")\n\n plot_line_graph(analysis_data, ((\"Iteration\", 0), (\"Runtime\", 1)), path_to_runtime_plot)\n plot_line_graph(analysis_data, ((\"Iteration\", 0), (\"KL Divergence\", 2)), path_to_kl_plot)\n\n @staticmethod\n def run_analysis(model_name, path_to_data, output_folder_path, iterations, max_word_length, analysis_frequency):\n \"\"\"\n Runs the NPYLM on the data found in the file at the provided path for the provided number\n of iterations. Generates analysis data with the frequency given. Outputs all results (including\n serialized models to a new folder created in the output_folder_path folder.\n \n :param model_name: The name of the model.\n :param path_to_data: Path to the file containing the segmented data.\n :param output_folder_path: Path to the folder to which to write analysis data.\n :param iterations: Number of iterations for which to run the sampler.\n :param max_word_length: Max number of symbols per word.\n :param analysis_frequency: The frequency at which to probe the sampler (e.g. for\n analysis_frequency = 10, it will generate analysis data\n at every 10th iteration.\n \"\"\"\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n # Create output folder.\n now = datetime.now()\n model_output_folder_name = \"[NPYLM][\" + model_name + \"]\" + \\\n \"[\" + str(now.day) + \"_\" + str(now.month) + \"]\" + \\\n \"[\" + str(now.hour) + \"_\" + str(now.minute) + \"]\"\n model_output_folder_path = os.path.join(output_folder_path, model_output_folder_name)\n if not os.path.exists(model_output_folder_path):\n os.makedirs(model_output_folder_path)\n print(\"Created output folder.\\n\")\n\n # Create serialization folder.\n serialization_folder = os.path.join(model_output_folder_path, \"Serialized Models\")\n if not os.path.exists(serialization_folder):\n os.makedirs(serialization_folder)\n print(\"Created serialization folder\\n\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, max_word_length)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.run_sampler(sentences, iterations, analysis_frequency, serialization_folder)\n print(\"Sampling finished.\\n\")\n\n # Copy data to output folder.\n data_copy_path = os.path.join(model_output_folder_path, \"Data Copy.txt\")\n copyfile(path_to_data, data_copy_path)\n print(\"Copied dataset to output folder.\\n\")\n\n # Write the output of the sampler.\n model_output_file_path = os.path.join(model_output_folder_path, \"Output.txt\")\n with open(model_output_file_path, \"w\") as f:\n f.writelines(output_segmentation)\n print(\"NPYLM output written to file.\\n\")\n\n # Generate statistics.\n data_symbol_count = len(alphabet)\n mean_output_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(output_segmentation)\n mean_input_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(sentences)\n sentences_count = len(sentences)\n correct_segmentation_percentage = sum([output_segmentation[i] == sentences[i]\n for i in range(sentences_count)]) / sentences_count\n # if analysis_data is not None:\n # kl_divergence = analysis_data[len(analysis_data)-1][2]\n # else:\n # kl_divergence = 999.9\n #\n # # Write report.\n # model_report_file_path = os.path.join(model_output_folder_path, \"Report.txt\")\n # NestedPitmanYorLanguageModelWrapper.write_report(model_report_file_path, model_name, total_run_time,\n # mean_run_time, data_symbol_count,\n # mean_output_word_length, mean_input_word_length,\n # correct_segmentation_percentage, kl_divergence)\n # print(\"Analysis report written to file.\\n\")\n #\n # # Create plots folder.\n # plots_output_folder_path = os.path.join(model_output_folder_path, \"Plots\")\n # if not os.path.exists(plots_output_folder_path):\n # os.makedirs(plots_output_folder_path)\n # print(\"Created plots folder.\\n\")\n #\n # # Generate plots.\n # #NestedPitmanYorLanguageModelWrapper.generate_plots(plots_output_folder_path, analysis_data)\n # print(\"Generated plots.\\n\")\n\n @staticmethod\n def profile_sampler(path_to_data, max_word_length):\n \"\"\"\n This function runs the sampler for one iterations in order to extract profiling information.\n \n :param path_to_data: Path to data file.\n :param max_word_length: Max allowed word length\n \"\"\"\n iterations = 1\n analysis_frequency = 20\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, max_word_length)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.run_sampler(sentences, iterations, analysis_frequency, \"abc\")\n print(\"Sampling finished.\\n\")\n\n # avoid dead code optimization\n print(str(total_run_time))\n\n\n @staticmethod\n def resume_analysis(model_name, path_to_data, model_output_folder_path, analysis_frequency, file_index):\n # TODO: Extract common code between this and the run_analysis function.\n \"\"\"\n Loads a serialized model and resumes the analysis.\n \n :param model_name: The name of the model.\n :param path_to_data: Path to the file containing the segmented data.\n :param model_output_folder_path: Path to the model output folder.\n :param analysis_frequency: The frequency at which to probe the sampler (e.g. for\n analysis_frequency = 10, it will generate analysis data\n at every 10th iteration.\n :param file_index: Index of the serialization file which contains the state at which to\n resume the sampler.\n \"\"\"\n\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n print(\"Loaded data\\n\")\n\n serialization_folder_path = os.path.join(model_output_folder_path, \"Serialized Models\")\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, 0)\n output_segmentation, analysis_data, total_run_time, mean_run_time = \\\n npylm.resume_analysis(sentences, analysis_frequency, serialization_folder_path, file_index)\n print(\"Sampling finished.\\n\")\n\n # Copy data to output folder.\n data_copy_path = os.path.join(model_output_folder_path, \"Data Copy.txt\")\n copyfile(path_to_data, data_copy_path)\n print(\"Copied dataset to output folder.\\n\")\n\n # Write the output of the sampler.\n model_output_file_path = os.path.join(model_output_folder_path, \"Output.txt\")\n with open(model_output_file_path, \"w\") as f:\n f.writelines(output_segmentation)\n print(\"NPYLM output written to file.\\n\")\n\n # Generate statistics.\n data_symbol_count = len(alphabet)\n mean_output_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(output_segmentation)\n mean_input_word_length = NestedPitmanYorLanguageModelWrapper.get_mean_word_length(sentences)\n sentences_count = len(sentences)\n correct_segmentation_percentage = sum([output_segmentation[i] == sentences[i]\n for i in range(sentences_count)]) / sentences_count\n kl_divergence = analysis_data[len(analysis_data)-1][2]\n\n # Write report.\n model_report_file_path = os.path.join(model_output_folder_path, \"Report.txt\")\n NestedPitmanYorLanguageModelWrapper.write_report(model_report_file_path, model_name, total_run_time,\n mean_run_time, data_symbol_count,\n mean_output_word_length, mean_input_word_length,\n correct_segmentation_percentage, kl_divergence)\n print(\"Analysis report written to file.\\n\")\n\n # Create plots folder.\n plots_output_folder_path = os.path.join(model_output_folder_path, \"Plots\")\n if not os.path.exists(plots_output_folder_path):\n os.makedirs(plots_output_folder_path)\n print(\"Created plots folder.\\n\")\n\n # Generate plots.\n NestedPitmanYorLanguageModelWrapper.generate_plots(plots_output_folder_path, analysis_data)\n print(\"Generated plots.\\n\")\n\n @staticmethod\n def generate_output_for_serialized_model(path_to_data, path_to_model_file, path_to_output_file):\n # Load data.\n sentences, alphabet = NestedPitmanYorLanguageModelWrapper.load_text(path_to_data, True)\n\n # Run sampler.\n npylm = NestedPitmanYorLanguageModel(alphabet, 0)\n npylm._deserialize_model(path_to_model_file)\n output_segmentation = npylm.run_sampler_once(sentences, True)\n\n with open(path_to_output_file, \"w\") as f:\n f.writelines(output_segmentation)\n\n\n# NestedPitmanYorLanguageModelWrapper.run_analysis(model_name=\"Dummy Model Full Data2\",\n# path_to_data=\".\\\\mobydick.txt\",\n# output_folder_path=\".\\\\Output\",\n# iterations=1,\n# max_word_length=13,\n# analysis_frequency=20)\n\n\nNestedPitmanYorLanguageModelWrapper.profile_sampler(path_to_data=\".\\\\mobydick.txt\",\n max_word_length=13)\n\ndef fbs_sum_sample_y(r, tables):\n strength_param = 1.0\n discount_param = 1.0\n\n # This is a fast bernoulli sample + count across different trials with different parameters.\n # The parameters are given by miu = theta / (*tables[i]*d) + theta).\n miu = np.array(list(range(1, tables)), dtype=float)\n miu = strength_param / ((miu * discount_param) + strength_param)\n return (miu >= r).sum()\n\n\ndef sfbs_sum_sample_y(r, tables):\n a = 1.0\n b = 1.0\n\n n = floor((a/b) * ((1-r)/r))\n return min(n, tables)\n\n\ndef bernoulli_trial(mu):\n r = random()\n if r <= mu:\n return 1\n else:\n return 0\n\n\ndef bs_sum_sample_y(tables):\n strength_param = 1.0\n discount_param = 1.0\n\n sum = 0\n for i in range(1,tables+1):\n #sum += bernoulli_trial(strength_param / (strength_param + discount_param * i))\n sum += np.random.binomial(1, strength_param / (strength_param + discount_param * i))\n\n return sum\n\n\ndef compare_sampling_functions(iterations=100, tables=20, prt=None):\n sfbs_results = list()\n bs_results = list()\n sfbs_times = list()\n bs_times = list()\n for i in range(iterations):\n if prt is not None:\n print(\"\\nTest run \" + str(i))\n\n # Measure random number generation\n t_start = clock()\n r = random()\n t_end = clock()\n t_random_gen = t_end - t_start\n\n # Measure sfbs\n t_start = clock()\n result_sfbs = sfbs_sum_sample_y(r, tables)\n t_end = clock()\n t_sfbs = t_end - t_start\n\n # Measure bs\n t_start = clock()\n result_bs = bs_sum_sample_y(tables)\n t_end = clock()\n t_bs = t_end - t_start\n\n sfbs_times.append(t_sfbs)\n bs_times.append(t_bs)\n\n # Report results:\n if prt is not None:\n print(\"Times:\\tSFBS: \" + str(t_sfbs+t_random_gen) + \"\\t|\\tBS: \" + str(t_bs))\n print(\"Results:\\tSFBS: \" + str(result_sfbs) + \"\\t|\\tBS: \" + str(result_bs))\n\n # Record results for statistical analysis\n sfbs_results.append(result_sfbs)\n bs_results.append(result_bs)\n\n m_fbs = sum(sfbs_results) / len(sfbs_results)\n m_bs = sum(bs_results) / len(bs_results)\n print(\"\\nMean results:\\tSFBS: \" + str(m_fbs) + \"\\t|\\tBS: \" + str(m_bs))\n print(\"Times:\\tSFBS: \" + str(sum(sfbs_times)) + \"\\t|\\tBS: \" + str(sum(bs_times)))\n print(\"Speedup: \" + str(sum(bs_times)/sum(sfbs_times)))\n\ndef compare_means():\n its = 400\n tables = 100\n total_count1 = 0\n total_count2 = 0\n for i in range(its):\n r = random()\n total_count1 += sfbs_sum_sample_y(r, tables)\n total_count2 += bs_sum_sample_y(tables)\n\n print(\"Mean SFBS: \" + str(total_count1 / its))\n print(\"Mean BS: \" + str(total_count2 / its))\n\n", "sub_path": "npylm_wrapper.py", "file_name": "npylm_wrapper.py", "file_ext": "py", "file_size_in_byte": 17429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "plot_utils.plot_line_graph", "line_number": 107, "usage_type": "call"}, {"api_name": "plot_utils.plot_line_graph", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 144, "usage_type": "call"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 253, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 278, "usage_type": "call"}, {"api_name": "actual_npylm.NestedPitmanYorLanguageModel", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 325, "usage_type": "call"}, {"api_name": "random.random", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.random.binomial", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 344, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 359, "usage_type": "call"}, {"api_name": "random.random", "line_number": 360, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 361, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 365, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 367, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 371, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 373, "usage_type": "call"}, {"api_name": "random.random", "line_number": 400, "usage_type": "call"}]}
+{"seq_id": "320890769", "text": "from keras.models import Model\nfrom keras.layers import Conv2D, Activation, Input\nfrom keras import optimizers\nfrom keras.models import load_model\nimport numpy as np\nimport scipy.misc\nimport scipy.ndimage\nimport cv2\nimport math\nimport glob\nimport matplotlib.pyplot as plt\n\nimg_shape = (32,32,1)\ninput_img = Input(shape=(img_shape))\nC1 = Conv2D(64,(9,9),padding='SAME',name='CONV1')(input_img)\nA1 = Activation('relu', name='act1')(C1)\nC2 = Conv2D(32,(1,1),padding='SAME',name='CONV2')(A1)\nA2 = Activation('relu', name='act2')(C2)\nC3 = Conv2D(1,(5,5),padding='SAME',name='CONV3')(A2)\nA3 = Activation('relu', name='act3')(C3)\nmodel = Model(input_img, A3)\nopt = optimizers.Adam(lr=0.0003)\nmodel.compile(optimizer=opt,loss='mean_squared_error')\nmodel.summary()\n\ndef modcrop(image, scale=2): #BY DEFAULT SCALE 2\n if len(image.shape) == 3:\n h, w, _ = image.shape\n h = h - np.mod(h, scale)\n w = w - np.mod(w, scale)\n image = image[0:h, 0:w, :]\n else:\n h, w = image.shape\n h = h - np.mod(h, scale)\n w = w - np.mod(w, scale)\n image = image[0:h, 0:w]\n return image\n\ndef create_LR(image,scale):\n label_ = modcrop(image, scale)\n label_ = label_ / 255.\n input_ = scipy.ndimage.interpolation.zoom(label_, (1./scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n return input_\n\npath = './img/yang91/'\nfiles_y = glob.glob(path + '*.*')\ntrainfiles = files_y[:60] #HERE TOTAL IMAGES ARE 91 , SO FROM 91 up to 85 used for Training\nvalfiles = files_y[60:] #HERE Above 85 used for Validation Set\nimg_size = 32\nstride = 16\nX_train = []\nY_train = []\nX_val = []\nY_val = []\n\nfrom matplotlib.pyplot import imread\n\n# Extract patch image for training\nfor file_y in trainfiles:\n tmp_y = scipy.ndimage.imread(file_y,flatten=True, mode='YCbCr').astype(np.float)\n tmp_X = create_LR(tmp_y,2) #############################################################SCALE###########\n h,w = tmp_y.shape\n for x in range(0, h-img_size+1, stride):\n for y in range(0, w-img_size+1, stride):\n sub_input = tmp_X[x:x+img_size,y:y+img_size].reshape(img_size,img_size,1)\n sub_label = tmp_y[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1)\n X_train.append(sub_input)\n Y_train.append(sub_label)\n\n# Extract patch image for validation\nfor file_y in valfiles:\n tmp_y = scipy.misc.imread(file_y,flatten=True, mode='YCbCr').astype(np.float)\n tmp_X = create_LR(tmp_y,2)###########################################################SCALE################\n h,w = tmp_y.shape\n for x in range(0, h-img_size+1, stride):\n for y in range(0, w-img_size+1, stride):\n sub_input = tmp_X[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1) # [32 x 32]\n sub_label = tmp_y[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1) # [32 x 32]\n X_val.append(sub_input)\n Y_val.append(sub_label)\n\nX_train = np.array(X_train)\nY_train = np.array(Y_train)\nX_val = np.array(X_val)\nY_val = np.array(Y_val)\n\nmodel.fit(X_train, Y_train, batch_size = 128, epochs = 30, validation_data=(X_val, Y_val))\nmodel.save('wscale2.h5')\n\nimg_o = scipy.misc.imread('./img/baby_x2_GT.png',flatten=True,mode='YCbCr').astype(np.float)\nimg = create_LR(img_o,2) #################################################################SCALE#################\nimg_size = 32\nstride = 16\nh,w = img.shape\npiece_wise = []\nfor x in range(0, h-img_size+1, stride):\n for y in range (0, w-img_size+1, stride):\n sub_input = img[x:x+img_size, y:y+img_size].reshape(img_size,img_size,1)\n piece_wise.append(sub_input)\ninput_ = np.asarray(piece_wise)\nsrcnn = load_model('wscale2.h5')\nhat = srcnn.predict(input_)\nimg_re = np.zeros(img.shape)\ni = 0\nfor x in range(0, h-img_size+1, stride):\n for y in range (0, w-img_size+1, stride):\n img_re[x:x+img_size, y:y+img_size] = hat[i].reshape(img_size,img_size)\n i += 1\ncv2.imwrite('restored1.bmp', img_re)\ncv2.imwrite('HR1.bmp', img_o)\nimg_save = (img*255).astype(np.uint8)\ncv2.imwrite('blurred1.bmp',img_save)\n\n#CALCULATE PSNR\noriginal = cv2.imread(\"HR1.bmp\")\nLR = cv2.imread(\"blurred1.bmp\")\ncontrast = cv2.imread(\"restored1.bmp\",1)\n\ndef psnr(img1, img2):\n mse = np.mean((img1-img2)**2)\n if mse ==0:\n return 100\n PIXEL_MAX = 255.0\n return 20* math.log10(PIXEL_MAX / math.sqrt(mse))\nd = psnr(original,contrast)\nprint(d)\n\nfig = plt.figure(figsize = (14,14), dpi = 100)\nax = plt.subplot(\"131\")\nax.imshow(original)\nax.set_title(\"GT\")\nplt.grid(0)\n\nax = plt.subplot(\"132\")\nax.imshow(LR)\nax.set_title(\"blurred_Image\")\nplt.grid(0)\n\nax = plt.subplot(\"133\")\nax.imshow(contrast)\nax.set_title(\"HR_RECONSTRUCTED\")\nplt.grid(0)\nplt.show()\n\n", "sub_path": "04.image_video_bm/SRCNN/srcnn_keras.py", "file_name": "srcnn_keras.py", "file_ext": "py", "file_size_in_byte": 4791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.layers.Input", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.mod", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage.interpolation.zoom", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 42, "usage_type": "name"}, {"api_name": "scipy.misc.ndimage.interpolation.zoom", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 43, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage.imread", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.misc.ndimage", "line_number": 61, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 61, "usage_type": "attribute"}, {"api_name": "scipy.misc.misc.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 73, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.misc.misc.imread", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 91, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 121, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 125, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}]}
+{"seq_id": "441429683", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Registrado\n\nclass AdminRegistrado(admin.ModelAdmin):\n\tlist_display = [\"__str__\", \"nombre\", \"codigo_postal\", \"timestap\", \"actualizado\"]\n\tclass Meta:\n\t\tmodel = Registrado\n\nadmin.site.register(Registrado, AdminRegistrado)\n", "sub_path": "boletin/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Registrado", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Registrado", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "219136152", "text": "'''Utilities relating to installing services\n\n************************************************************************\nFOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE\nSHOULD ALSO BE APPLIED TO sdk_install IN ANY OTHER PARTNER REPOS\n************************************************************************\n'''\nimport logging\nimport time\n\nimport dcos.cosmos\nimport dcos.errors\nimport dcos.marathon\nimport dcos.packagemanager\nimport dcos.subcommand\nimport retrying\nimport shakedown\n\nimport sdk_cmd\nimport sdk_marathon\nimport sdk_plan\nimport sdk_utils\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT_SECONDS = 15 * 60\n\n'''List of services which are currently installed via install().\nUsed by post-test diagnostics to retrieve stuff from currently running services.'''\n_installed_service_names = set([])\n\n'''List of dead agents which should be ignored when checking for orphaned resources.\nUsed by uninstall when validating that an uninstall completed successfully.'''\n_dead_agent_hosts = set([])\n\n\ndef get_installed_service_names() -> set:\n '''Returns the a set of service names which had been installed via sdk_install in this session.'''\n return _installed_service_names\n\n\n@retrying.retry(stop_max_attempt_number=3,\n retry_on_exception=lambda e: isinstance(e, dcos.errors.DCOSException))\ndef _retried_install_impl(\n package_name,\n service_name,\n expected_running_tasks,\n options={},\n package_version=None,\n timeout_seconds=TIMEOUT_SECONDS):\n '''Cleaned up version of shakedown's package_install().'''\n package_manager = dcos.packagemanager.PackageManager(dcos.cosmos.get_cosmos_url())\n pkg = package_manager.get_package_version(package_name, package_version)\n\n if package_version is None:\n # Get the resolved version for logging below\n package_version = 'auto:{}'.format(pkg.version())\n\n log.info('Installing package={} service={} with options={} version={}'.format(\n package_name, service_name, options, package_version))\n\n # Trigger package install, but only if it's not already installed.\n # We expect upstream to have confirmed that it wasn't already installed beforehand.\n if sdk_marathon.app_exists(service_name):\n log.info('Marathon app={} exists, skipping package install call'.format(service_name))\n else:\n package_manager.install_app(pkg, options)\n\n # Install CLI while package starts to install\n if pkg.cli_definition():\n log.info('Installing CLI for package={}'.format(package_name))\n dcos.subcommand.install(pkg)\n\n # Wait for expected tasks to come up\n if expected_running_tasks > 0:\n shakedown.wait_for_service_tasks_running(\n service_name, expected_running_tasks, timeout_seconds)\n\n # Wait for completed marathon deployment\n app_id = pkg.marathon_json(options).get('id')\n shakedown.deployment_wait(timeout_seconds, app_id)\n\n\ndef install(\n package_name,\n service_name,\n expected_running_tasks,\n additional_options={},\n package_version=None,\n timeout_seconds=TIMEOUT_SECONDS,\n wait_for_deployment=True,\n insert_strict_options=True):\n start = time.time()\n\n # If the package is already installed at this point, fail immediately.\n if sdk_marathon.app_exists(service_name):\n raise Exception('Service is already installed: {}'.format(service_name))\n\n if insert_strict_options and sdk_utils.is_strict_mode():\n # strict mode requires correct principal and secret to perform install.\n # see also: sdk_security.py\n options = sdk_utils.merge_dictionaries({\n 'service': {\n 'service_account': 'service-acct',\n 'principal': 'service-acct',\n 'service_account_secret': 'secret',\n 'secret_name': 'secret'\n }\n }, additional_options)\n else:\n options = additional_options\n\n # 1. Install package, wait for tasks, wait for marathon deployment\n _retried_install_impl(\n package_name,\n service_name,\n expected_running_tasks,\n options,\n package_version,\n timeout_seconds)\n\n # 2. Wait for the scheduler to be idle (as implied by deploy plan completion and suppressed bit)\n # This should be skipped ONLY when it's known that the scheduler will be stuck in an incomplete\n # state, or if the thing being installed doesn't have a deployment plan (e.g. standalone app)\n if wait_for_deployment:\n # this can take a while, default is 15 minutes. for example with HDFS, we can hit the expected\n # total task count via FINISHED tasks, without actually completing deployment\n log.info('Waiting for package={} service={} to finish deployment plan...'.format(\n package_name, service_name))\n sdk_plan.wait_for_completed_deployment(service_name, timeout_seconds)\n\n log.info('Installed package={} service={} after {}'.format(\n package_name, service_name, shakedown.pretty_duration(time.time() - start)))\n\n global _installed_service_names\n _installed_service_names.add(service_name)\n\n\n@retrying.retry(stop_max_attempt_number=5,\n wait_fixed=5000,\n retry_on_exception=lambda e: isinstance(e, Exception))\ndef _retried_run_janitor(service_name):\n auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip()\n\n cmd_list = [\"docker\", \"run\", \"mesosphere/janitor\", \"/janitor.py\",\n \"-r\", sdk_utils.get_role(service_name),\n \"-p\", service_name + '-principal',\n \"-z\", sdk_utils.get_zk_path(service_name),\n \"--auth_token={}\".format(auth_token)]\n\n sdk_cmd.master_ssh(\" \".join(cmd_list))\n\n\n@retrying.retry(stop_max_attempt_number=5,\n wait_fixed=5000,\n retry_on_exception=lambda e: isinstance(e, Exception))\ndef _retried_uninstall_package_and_wait(*args, **kwargs):\n shakedown.uninstall_package_and_wait(*args, **kwargs)\n\n\ndef _verify_completed_uninstall(service_name):\n state_summary = sdk_cmd.cluster_request('GET', '/mesos/state-summary').json()\n\n # There should be no orphaned resources in the state summary (DCOS-30314)\n orphaned_resources = 0\n ignored_orphaned_resources = 0\n service_role = sdk_utils.get_role(service_name)\n for agent in state_summary['slaves']:\n # resources should be grouped by role. check for any resources in our expected role:\n matching_reserved_resources = agent['reserved_resources'].get(service_role)\n if matching_reserved_resources:\n if agent['hostname'] in _dead_agent_hosts:\n # The test told us ahead of time to expect orphaned resources on this host.\n log.info('Ignoring orphaned resources on agent {}/{}: {}'.format(\n agent['id'], agent['hostname'], matching_reserved_resources))\n ignored_orphaned_resources += len(matching_reserved_resources)\n else:\n log.error('Orphaned resources on agent {}/{}: {}'.format(\n agent['id'], agent['hostname'], matching_reserved_resources))\n orphaned_resources += len(matching_reserved_resources)\n if orphaned_resources:\n log.error('{} orphaned resources (plus {} ignored) after uninstall of {}'.format(\n orphaned_resources, ignored_orphaned_resources, service_name))\n log.error(state_summary)\n raise Exception('Found {} orphaned resources (plus {} ignored) after uninstall of {}'.format(\n orphaned_resources, ignored_orphaned_resources, service_name))\n elif ignored_orphaned_resources:\n log.info('Ignoring {} orphaned resources after uninstall of {}'.format(\n ignored_orphaned_resources, service_name))\n log.info(state_summary)\n else:\n log.info('No orphaned resources for role {} were found'.format(service_role))\n\n # There should be no framework entry for this service in the state summary (DCOS-29474)\n orphaned_frameworks = [fwk for fwk in state_summary['frameworks'] if fwk['name'] == service_name]\n if orphaned_frameworks:\n log.error('{} orphaned frameworks named {} after uninstall of {}: {}'.format(\n len(orphaned_frameworks), service_name, service_name, orphaned_frameworks))\n log.error(state_summary)\n raise Exception('Found {} orphaned frameworks named {} after uninstall of {}: {}'.format(\n len(orphaned_frameworks), service_name, service_name, orphaned_frameworks))\n log.info('No orphaned frameworks for service {} were found'.format(service_name))\n\n\ndef ignore_dead_agent(agent_host):\n '''Marks the specified agent as destroyed. When uninstall() is next called, any orphaned\n resources against this agent will be logged but will not result in a thrown exception.\n '''\n _dead_agent_hosts.add(agent_host)\n log.info('Added {} to expected dead agents for resource validation purposes: {}'.format(\n agent_host, _dead_agent_hosts))\n\n\ndef uninstall(package_name, service_name):\n '''Uninstalls the specified service from the cluster, and verifies that its resources and\n framework were correctly cleaned up after the uninstall has completed. Any agents which are\n expected to have orphaned resources (e.g. due to being shut down) should be passed to\n ignore_dead_agent() before triggering the uninstall.\n '''\n start = time.time()\n\n log.info('Uninstalling {}'.format(service_name))\n\n try:\n _retried_uninstall_package_and_wait(package_name, service_name=service_name)\n except Exception:\n log.exception('Got exception when uninstalling {}'.format(service_name))\n raise\n\n cleanup_start = time.time()\n\n try:\n if sdk_utils.dcos_version_less_than('1.10'):\n # 1.9 and earlier: Run janitor to unreserve resources\n log.info('Janitoring {}'.format(service_name))\n _retried_run_janitor(service_name)\n else:\n # 1.10 and later: Wait for uninstall scheduler to finish and be removed by Cosmos\n log.info('Waiting for Marathon app to be removed {}'.format(service_name))\n sdk_marathon.retried_wait_for_deployment_and_app_removal(\n sdk_marathon.get_app_id(service_name), timeout=TIMEOUT_SECONDS)\n except Exception:\n log.exception('Got exception when cleaning up {}'.format(service_name))\n raise\n\n finish = time.time()\n\n log.info(\n 'Uninstalled {} after pkg({}) + cleanup({}) = total({})'.format(\n service_name,\n shakedown.pretty_duration(cleanup_start - start),\n shakedown.pretty_duration(finish - cleanup_start),\n shakedown.pretty_duration(finish - start)))\n\n # Sanity check: Verify that all resources and the framework have been successfully cleaned up,\n # and throw an exception if anything is left over (uninstall bug?)\n _verify_completed_uninstall(service_name)\n\n # Finally, remove the service from the installed list (used by sdk_diag)\n global _installed_service_names\n try:\n _installed_service_names.remove(service_name)\n except KeyError:\n pass # Expected when tests preemptively uninstall at start of test\n", "sub_path": "testing/sdk_install.py", "file_name": "sdk_install.py", "file_ext": "py", "file_size_in_byte": 11282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "dcos.cosmos.packagemanager.PackageManager", "line_number": 52, "usage_type": "call"}, {"api_name": "dcos.cosmos.packagemanager", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 52, "usage_type": "name"}, {"api_name": "dcos.cosmos.cosmos.get_cosmos_url", "line_number": 52, "usage_type": "call"}, {"api_name": "dcos.cosmos.cosmos", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sdk_marathon.app_exists", "line_number": 64, "usage_type": "call"}, {"api_name": "dcos.cosmos.subcommand.install", "line_number": 72, "usage_type": "call"}, {"api_name": "dcos.cosmos.subcommand", "line_number": 72, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 72, "usage_type": "name"}, {"api_name": "shakedown.wait_for_service_tasks_running", "line_number": 76, "usage_type": "call"}, {"api_name": "shakedown.deployment_wait", "line_number": 81, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 42, "usage_type": "call"}, {"api_name": "dcos.cosmos.errors", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dcos.cosmos", "line_number": 43, "usage_type": "name"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "sdk_marathon.app_exists", "line_number": 96, "usage_type": "call"}, {"api_name": "sdk_utils.is_strict_mode", "line_number": 99, "usage_type": "call"}, {"api_name": "sdk_utils.merge_dictionaries", "line_number": 102, "usage_type": "call"}, {"api_name": "sdk_plan.wait_for_completed_deployment", "line_number": 130, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 133, "usage_type": "call"}, {"api_name": "sdk_cmd.run_cli", "line_number": 143, "usage_type": "call"}, {"api_name": "sdk_utils.get_role", "line_number": 146, "usage_type": "call"}, {"api_name": "sdk_utils.get_zk_path", "line_number": 148, "usage_type": "call"}, {"api_name": "sdk_cmd.master_ssh", "line_number": 151, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 139, "usage_type": "call"}, {"api_name": "shakedown.uninstall_package_and_wait", "line_number": 158, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 154, "usage_type": "call"}, {"api_name": "sdk_cmd.cluster_request", "line_number": 162, "usage_type": "call"}, {"api_name": "sdk_utils.get_role", "line_number": 167, "usage_type": "call"}, {"api_name": "time.time", "line_number": 220, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "sdk_utils.dcos_version_less_than", "line_number": 233, "usage_type": "call"}, {"api_name": "sdk_marathon.retried_wait_for_deployment_and_app_removal", "line_number": 240, "usage_type": "call"}, {"api_name": "sdk_marathon.get_app_id", "line_number": 241, "usage_type": "call"}, {"api_name": "time.time", "line_number": 246, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 251, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 252, "usage_type": "call"}, {"api_name": "shakedown.pretty_duration", "line_number": 253, "usage_type": "call"}]}
+{"seq_id": "56311996", "text": "# -*- coding: utf-8 -*-\n\nimport re\n\nimport pytest\nfrom django.core.urlresolvers import reverse\nfrom django_dynamic_fixture import G\nfrom pyquery import PyQuery as pq\n\nfrom readthedocs.builds.constants import LATEST\nfrom readthedocs.builds.models import Version\nfrom readthedocs.projects.models import HTMLFile, Project\nfrom readthedocs.search.tests.utils import (\n get_search_query_from_project_file,\n DATA_TYPES_VALUES,\n)\n\n\n@pytest.mark.django_db\n@pytest.mark.search\nclass TestProjectSearch:\n url = reverse('search')\n\n def _get_search_result(self, url, client, search_params):\n resp = client.get(url, search_params)\n assert resp.status_code == 200\n\n results = resp.context['results']\n facets = resp.context['facets']\n\n return results, facets\n\n def test_search_by_project_name(self, client, project, all_projects):\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': project.name },\n )\n\n assert len(results) == 1\n assert project.name.encode('utf-8') in results[0].name.encode('utf-8')\n for proj in all_projects[1:]:\n assert proj.name.encode('utf-8') not in results[0].name.encode('utf-8')\n\n def test_search_project_have_correct_language_facets(self, client, project):\n \"\"\"Test that searching project should have correct language facets in the results\"\"\"\n # Create a project in bn and add it as a translation\n G(Project, language='bn', name=project.name)\n\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': project.name },\n )\n\n lang_facets = facets['language']\n lang_facets_str = [facet[0] for facet in lang_facets]\n # There should be 2 languages\n assert len(lang_facets) == 2\n assert sorted(lang_facets_str) == sorted(['en', 'bn'])\n for facet in lang_facets:\n assert facet[2] == False # because none of the facets are applied\n\n def test_search_project_filter_language(self, client, project):\n \"\"\"Test that searching project filtered according to language.\"\"\"\n # Create a project in bn and add it as a translation\n translate = G(Project, language='bn', name=project.name)\n search_params = { 'q': project.name, 'language': 'bn' }\n\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n # There should be only 1 result\n assert len(results) == 1\n\n lang_facets = facets['language']\n lang_facets_str = [facet[0] for facet in lang_facets]\n\n # There should be 2 languages because both `en` and `bn` should show there\n assert len(lang_facets) == 2\n assert sorted(lang_facets_str) == sorted(['en', 'bn'])\n\n\n@pytest.mark.django_db\n@pytest.mark.search\nclass TestPageSearch(object):\n url = reverse('search')\n\n def _get_search_result(self, url, client, search_params):\n resp = client.get(url, search_params)\n assert resp.status_code == 200\n\n results = resp.context['results']\n facets = resp.context['facets']\n\n return results, facets\n\n def _get_highlight(self, result, data_type):\n # if query is from page title,\n # highlighted title is present in 'result.meta.highlight.title'\n if data_type == 'title':\n highlight = result.meta.highlight.title\n\n # if result is not from page title,\n # then results and highlighted results are present inside 'inner_hits'\n else:\n inner_hits = result.meta.inner_hits\n assert len(inner_hits) >= 1\n\n # checking first inner_hit\n inner_hit_0 = inner_hits[0]\n expected_type = data_type.split('.')[0] # can be either 'sections' or 'domains'\n assert inner_hit_0['type'] == expected_type\n highlight = inner_hit_0['highlight'][data_type]\n\n return highlight\n\n def _get_highlighted_words(self, string):\n highlighted_words = re.findall(\n '(.*?)',\n string\n )\n return highlighted_words\n\n @pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)\n @pytest.mark.parametrize('page_num', [0, 1])\n def test_file_search(self, client, project, data_type, page_num):\n query = get_search_query_from_project_file(\n project_slug=project.slug,\n page_num=page_num,\n data_type=data_type\n )\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' }\n )\n assert len(results) >= 1\n\n # checking first result\n result_0 = results[0]\n highlight = self._get_highlight(result_0, data_type)\n assert len(highlight) == 1\n\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n # Make it lower because our search is case insensitive\n assert word.lower() in query.lower()\n\n def test_file_search_have_correct_role_name_facets(self, client):\n \"\"\"Test that searching files should result all role_names.\"\"\"\n\n # searching for 'celery' to test that\n # correct role_names are displayed\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': 'celery', 'type': 'file' }\n )\n assert len(results) >= 1\n role_name_facets = facets['role_name']\n role_name_facets_str = [facet[0] for facet in role_name_facets]\n expected_role_names = ['py:class', 'py:function', 'py:method']\n assert sorted(expected_role_names) == sorted(role_name_facets_str)\n for facet in role_name_facets:\n assert facet[2] == False # because none of the facets are applied\n\n def test_file_search_filter_role_name(self, client):\n \"\"\"Test that searching files filtered according to role_names.\"\"\"\n\n search_params = { 'q': 'celery', 'type': 'file' }\n # searching without the filter\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params\n )\n assert len(results) >= 2 # there are > 1 results without the filter\n role_name_facets = facets['role_name']\n for facet in role_name_facets:\n assert facet[2] == False # because none of the facets are applied\n\n confval_facet = 'py:class'\n # checking if 'py:class' facet is present in results\n assert confval_facet in [facet[0] for facet in role_name_facets]\n\n # filtering with role_name=py:class\n search_params['role_name'] = confval_facet\n new_results, new_facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params\n )\n new_role_names_facets = new_facets['role_name']\n # there is only one result with role_name='py:class'\n # in `signals` page\n assert len(new_results) == 1\n first_result = new_results[0] # first result\n inner_hits = first_result.meta.inner_hits # inner_hits of first results\n assert len(inner_hits) >= 1\n inner_hit_0 = inner_hits[0] # first inner_hit\n assert inner_hit_0.type == 'domains'\n assert inner_hit_0.source.role_name == confval_facet\n\n for facet in new_role_names_facets:\n if facet[0] == confval_facet:\n assert facet[2] == True # because 'std:confval' filter is active\n else:\n assert facet[2] == False\n\n @pytest.mark.parametrize('data_type', DATA_TYPES_VALUES)\n @pytest.mark.parametrize('case', ['upper', 'lower', 'title'])\n def test_file_search_case_insensitive(self, client, project, case, data_type):\n \"\"\"\n Check File search is case insensitive.\n\n It tests with uppercase, lowercase and camelcase.\n \"\"\"\n query_text = get_search_query_from_project_file(\n project_slug=project.slug,\n data_type=data_type\n )\n cased_query = getattr(query_text, case)\n query = cased_query()\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' }\n )\n assert len(results) >= 1\n\n first_result = results[0]\n highlight = self._get_highlight(first_result, data_type)\n assert len(highlight) == 1\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n assert word.lower() in query.lower()\n\n def test_file_search_exact_match(self, client, project):\n \"\"\"\n Check quoted query match exact phrase.\n\n Making a query with quoted text like ``\"foo bar\"`` should match exactly\n ``foo bar`` phrase.\n \"\"\"\n\n # `Sphinx` word is present both in `kuma` and `docs` files\n # But the phrase `Sphinx uses` is present only in `kuma` docs.\n # So search with this phrase to check\n query = r'\"Sphinx uses\"'\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' })\n\n # there must be only 1 result\n # because the phrase is present in\n # only one project\n assert len(results) == 1\n assert results[0].project == 'kuma'\n assert results[0].path == 'testdocumentation'\n\n inner_hits = results[0].meta.inner_hits\n assert len(inner_hits) == 1\n assert inner_hits[0].type == 'sections'\n highlight = self._get_highlight(results[0], 'sections.content')\n assert len(highlight) == 1\n highlighted_words = self._get_highlighted_words(highlight[0])\n assert len(highlighted_words) >= 1\n for word in highlighted_words:\n assert word.lower() in query.lower()\n\n def test_file_search_have_correct_project_facets(self, client, all_projects):\n \"\"\"Test that file search have correct project facets in results\"\"\"\n\n # `environment` word is present both in `kuma` and `docs` files\n # so search with this phrase\n query = 'environment'\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' },\n )\n # There should be 2 search result\n assert len(results) == 2\n project_facets = facets['project']\n project_facets_str = [facet[0] for facet in project_facets]\n assert len(project_facets_str) == 2\n\n # kuma and pipeline should be there\n assert sorted(project_facets_str) == sorted(['kuma', 'docs'])\n\n def test_file_search_filter_by_project(self, client):\n \"\"\"Test that search result are filtered according to project.\"\"\"\n\n # `environment` word is present both in `kuma` and `docs` files\n # so search with this phrase but filter through `kuma` project\n search_params = {\n 'q': 'environment',\n 'type': 'file',\n 'project': 'kuma'\n }\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n project_facets = facets['project']\n resulted_project_facets = [ facet[0] for facet in project_facets ]\n\n # There should be 1 search result as we have filtered\n assert len(results) == 1\n # kuma should should be there only\n assert 'kuma' == results[0].project\n\n # But there should be 2 projects in the project facets\n # as the query is present in both projects\n assert sorted(resulted_project_facets) == sorted(['kuma', 'docs'])\n\n @pytest.mark.xfail(reason='Versions are not showing correctly! Fixme while rewrite!')\n def test_file_search_show_versions(self, client, all_projects, es_index, settings):\n # override the settings to index all versions\n settings.INDEX_ONLY_LATEST = False\n\n project = all_projects[0]\n # Create some versions of the project\n versions = [G(Version, project=project) for _ in range(3)]\n query = get_search_query_from_project_file(project_slug=project.slug)\n results, facets = self._get_search_result(\n url=self.url,\n client=client,\n search_params={ 'q': query, 'type': 'file' },\n )\n\n # Results can be from other projects also\n assert len(results) >= 1\n\n version_facets = facets['version']\n version_facets_str = [facet[0] for facet in version_facets]\n # There should be total 4 versions\n # one is latest, and other 3 that we created above\n assert len(version_facets) == 4\n\n project_versions = [v.slug for v in versions] + [LATEST]\n assert sorted(project_versions) == sorted(resulted_version_facets)\n\n def test_file_search_subprojects(self, client, all_projects, es_index):\n \"\"\"\n TODO: File search should return results from subprojects also.\n\n This is currently disabled because the UX around it is weird.\n You filter by a project, and get results for multiple.\n \"\"\"\n project = all_projects[0]\n subproject = all_projects[1]\n # Add another project as subproject of the project\n project.add_subproject(subproject)\n\n # Now search with subproject content but explicitly filter by the parent project\n query = get_search_query_from_project_file(project_slug=subproject.slug)\n search_params = {\n 'q': query,\n 'type': 'file',\n 'project': project.slug,\n }\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n assert len(results) == 0\n\n def test_search_page_size(self, client, all_projects):\n query = 'are'\n search_params = {'q': query, 'type': 'file'}\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n # There should be 3 search result\n assert len(results) == 3\n\n search_params['page_size'] = 2\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 2\n\n search_params['page_size'] = 'not_number'\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 3\n\n search_params['page_size'] = ''\n\n results, _ = self._get_search_result(\n url=self.url,\n client=client,\n search_params=search_params,\n )\n\n assert len(results) == 3\n\n", "sub_path": "readthedocs/search/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 15255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.core.urlresolvers.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django_dynamic_fixture.G", "line_number": 48, "usage_type": "call"}, {"api_name": "readthedocs.projects.models.Project", "line_number": 48, "usage_type": "argument"}, {"api_name": "django_dynamic_fixture.G", "line_number": 67, "usage_type": "call"}, {"api_name": "readthedocs.projects.models.Project", "line_number": 67, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 90, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 122, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 131, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 128, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.DATA_TYPES_VALUES", "line_number": 128, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 129, "usage_type": "attribute"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 223, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 215, "usage_type": "call"}, {"api_name": "readthedocs.search.tests.utils.DATA_TYPES_VALUES", "line_number": 215, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 215, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 216, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 216, "usage_type": "attribute"}, {"api_name": "django_dynamic_fixture.G", "line_number": 333, "usage_type": "call"}, {"api_name": "readthedocs.builds.models.Version", "line_number": 333, "usage_type": "argument"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 334, "usage_type": "call"}, {"api_name": "readthedocs.builds.constants.LATEST", "line_number": 350, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 326, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 326, "usage_type": "attribute"}, {"api_name": "readthedocs.search.tests.utils.get_search_query_from_project_file", "line_number": 366, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 88, "usage_type": "attribute"}]}
+{"seq_id": "273947389", "text": "# -*- coding: utf-8 -*-\n###################################################################################\n#\n# Cybrosys Technologies Pvt. Ltd.\n#\n# Copyright (C) 2019-TODAY Cybrosys Technologies().\n# This program is free software: you can modify\n# it under the terms of the GNU Affero General Public License (AGPL) as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###################################################################################\n\nimport json\n\nfrom odoo.addons.http_routing.models.ir_http import slug, unslug\nfrom odoo.addons.website.controllers.main import QueryURL\nfrom odoo.addons.website_blog.controllers.main import WebsiteBlog\n\nfrom odoo import http, fields, SUPERUSER_ID\nfrom odoo.http import request\n\n\nclass BlogInherit(WebsiteBlog):\n \"\"\"Override class WebsiteBlog\"\"\"\n @http.route(['/blog',\n '''/blog/''',\n '''/blog//page/''',\n '''/blog//tag/''',\n '''/blog//tag//page/''',\n '''/blog/search_content''',\n ], type='http', auth=\"public\", website=True, csrf=False)\n def blog(self, blog=None, tag=None, page=1, **opt):\n \"\"\"function related to blog display\"\"\"\n date_begin, date_end, state = opt.get('date_begin'), opt.get('date_end'), opt.get('state')\n published_count, unpublished_count = 0, 0\n\n domain = request.website.website_domain()\n blog_post = request.env['blog.post']\n blogs = request.env['blog.blog'].search(domain, order=\"create_date asc\", limit=2)\n # retrocompatibility to accept tag as slug\n active_tag_ids = tag and [int(unslug(t)[1]) for t in tag.split(',')] if tag else []\n if active_tag_ids:\n fixed_tag_slug = \",\".join(slug(t) for t in request.env['blog.tag'].browse(active_tag_ids))\n if fixed_tag_slug != tag:\n return request.redirect(\n request.httprequest.full_path.replace(\"/tag/%s/\" % tag, \"/tag/%s/\" % fixed_tag_slug, 1), 301)\n domain += [('tag_ids', 'in', active_tag_ids)]\n if blog:\n domain += [('blog_id', '=', blog.id)]\n if date_begin and date_end:\n domain += [(\"post_date\", \">=\", date_begin), (\"post_date\", \"<=\", date_end)]\n\n if request.env.user.has_group('website.group_website_designer'):\n count_domain = domain + [(\"website_published\", \"=\", True), (\"post_date\", \"<=\", fields.Datetime.now())]\n published_count = blog_post.search_count(count_domain)\n unpublished_count = blog_post.search_count(domain) - published_count\n\n if state == \"published\":\n domain += [(\"website_published\", \"=\", True), (\"post_date\", \"<=\", fields.Datetime.now())]\n elif state == \"unpublished\":\n domain += ['|', (\"website_published\", \"=\", False), (\"post_date\", \">\", fields.Datetime.now())]\n else:\n domain += [(\"post_date\", \"<=\", fields.Datetime.now())]\n\n blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)\n\n search_string = opt.get('search', None)\n\n blog_posts = blog_post.search([('name', 'ilike', search_string)],\n offset=(page - 1) * self._blog_post_per_page,\n limit=self._blog_post_per_page) if search_string \\\n else blog_post.search(domain,\n order=\"post_date desc\")\n\n pager = request.website.pager(\n url=request.httprequest.path.partition('/page/')[0],\n total=len(blog_posts),\n page=page,\n step=self._blog_post_per_page,\n url_args=opt,\n )\n pager_begin = (page - 1) * self._blog_post_per_page\n pager_end = page * self._blog_post_per_page\n blog_posts = blog_posts[pager_begin:pager_end]\n\n all_tags = request.env['blog.tag'].search([])\n use_cover = request.website.viewref('website_blog.opt_blog_cover_post').active\n fullwidth_cover = request.website.viewref('website_blog.opt_blog_cover_post_fullwidth_design').active\n offset = (page - 1) * self._blog_post_per_page\n first_post = blog_posts\n if not blog:\n first_post = blog_posts.search(domain + [('website_published', '=', True)], order=\"post_date desc, id asc\",\n limit=1)\n if use_cover and not fullwidth_cover:\n offset += 1\n\n # function to create the string list of tag ids, and toggle a given one.\n # used in the 'Tags Cloud' template.\n\n def tags_list(tag_ids, current_tag):\n tag_ids = list(tag_ids) # required to avoid using the same list\n if current_tag in tag_ids:\n tag_ids.remove(current_tag)\n else:\n tag_ids.append(current_tag)\n tag_ids = request.env['blog.tag'].browse(tag_ids).exists()\n return ','.join(slug(tags) for tags in tag_ids)\n\n tag_category = sorted(all_tags.mapped('category_id'), key=lambda category: category.name.upper())\n other_tags = sorted(all_tags.filtered(lambda x: not x.category_id), key=lambda tags: tags.name.upper())\n values = {\n 'blog': blog,\n 'blogs': blogs,\n 'first_post': first_post.with_prefetch(blog_posts.ids) if not search_string else None,\n 'other_tags': other_tags,\n 'state_info': {\"state\": state, \"published\": published_count, \"unpublished\": unpublished_count},\n 'active_tag_ids': active_tag_ids,\n 'tags_list': tags_list,\n 'posts': blog_posts,\n 'blog_posts_cover_properties': [json.loads(b.cover_properties) for b in blog_posts],\n 'pager': pager,\n 'nav_list': self.nav_list(blog),\n 'blog_url': blog_url,\n 'date': date_begin,\n 'tag_category': tag_category,\n }\n response = request.render(\"website_blog.blog_post_short\", values)\n return response\n\n @http.route('/blog/search', csrf=False, type=\"http\", methods=['POST', 'GET'], auth=\"public\", website=True)\n def search_contents(self, **kw):\n \"\"\"get search result for auto suggestions\"\"\"\n strings = '%' + kw.get('name') + '%'\n try:\n domain = [('website_published', '=', True)]\n blog = request.env['blog.post'].with_user(SUPERUSER_ID).search(domain)\n sql = \"\"\"select id as res_id, name as name, name as value from blog_post where name ILIKE '{}'\"\"\"\n extra_query = ''\n limit = \" limit 15\"\n qry = sql + extra_query + limit\n request.cr.execute(qry.format(strings, tuple(blog and blog.ids)))\n name = request.cr.dictfetchall()\n except:\n name = {'name': 'None', 'value': 'None'}\n return json.dumps(name)\n", "sub_path": "website_search_blog/controllers/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "odoo.addons.website_blog.controllers.main.WebsiteBlog", "line_number": 32, "usage_type": "name"}, {"api_name": "odoo.http.request.website.website_domain", "line_number": 46, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 46, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 47, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 47, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 48, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.addons.http_routing.models.ir_http.unslug", "line_number": 50, "usage_type": "call"}, {"api_name": "odoo.addons.http_routing.models.ir_http.slug", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 52, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.http.request.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 54, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.full_path.replace", "line_number": 55, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 55, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 55, "usage_type": "name"}, {"api_name": "odoo.http.request.env.user.has_group", "line_number": 62, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 62, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 62, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 70, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 70, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 72, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 72, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 72, "usage_type": "name"}, {"api_name": "odoo.addons.website.controllers.main.QueryURL", "line_number": 74, "usage_type": "call"}, {"api_name": "odoo.http.request.website.pager", "line_number": 84, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 84, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 84, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.path.partition", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 85, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 85, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 95, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 95, "usage_type": "name"}, {"api_name": "odoo.http.request.website.viewref", "line_number": 96, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 96, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 96, "usage_type": "name"}, {"api_name": "odoo.http.request.website.viewref", "line_number": 97, "usage_type": "call"}, {"api_name": "odoo.http.request.website", "line_number": 97, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 97, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 115, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 115, "usage_type": "name"}, {"api_name": "odoo.addons.http_routing.models.ir_http.slug", "line_number": 116, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 129, "usage_type": "call"}, {"api_name": "odoo.http.request.render", "line_number": 136, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 136, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 34, "usage_type": "name"}, {"api_name": "odoo.SUPERUSER_ID", "line_number": 145, "usage_type": "argument"}, {"api_name": "odoo.http.request.env", "line_number": 145, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 145, "usage_type": "name"}, {"api_name": "odoo.http.request.cr.execute", "line_number": 150, "usage_type": "call"}, {"api_name": "odoo.http.request.cr", "line_number": 150, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 150, "usage_type": "name"}, {"api_name": "odoo.http.request.cr.dictfetchall", "line_number": 151, "usage_type": "call"}, {"api_name": "odoo.http.request.cr", "line_number": 151, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 151, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "odoo.http.route", "line_number": 139, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 139, "usage_type": "name"}]}
+{"seq_id": "416554006", "text": "from pdcresource import *\nfrom pdcglobal import *\nfrom magic import Spell\n\nclass ColdSpell(Spell):\n def __init__(self):\n Spell.__init__(self)\n self.color = BLUE\n self.type = ST_GENERIC\n \nclass FrostRay(ColdSpell):\n def __init__(self):\n ColdSpell.__init__(self)\n self.phys_cost = 15\n self.mind_cost = 30\n self.name = 'Frost Ray'\n self.infotext = 'Damage Foes with cold'\n\n def cast(self, caster):\n self.caster = caster\n self.game.wait_for_target = self\n self.game.player_actions.cursor()\n def target_choosen(self, pos):\n target = self.get_ray_target(self.caster.pos(), pos)\n if target == None:\n self.game.shout('Your spell fizzles')\n else:\n amount = d(self.caster.mind / 20) + self.caster.mind / 20\n self.game.do_damage(target, amount, D_COLD)\n self.game.shout('%s freezed %s' % (self.caster.name, target.name))\n self.game.wait_for_target = None\n self.game.state = S_RUN", "sub_path": "PDC/PDC_5/PDC/src/magic/cold_spells.py", "file_name": "cold_spells.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "magic.Spell", "line_number": 5, "usage_type": "name"}, {"api_name": "magic.Spell.__init__", "line_number": 7, "usage_type": "call"}, {"api_name": "magic.Spell", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "506827358", "text": "import json\r\n\r\nfrom flask import Flask, render_template, redirect, url_for, request\r\nfrom flask_wtf import FlaskForm\r\nfrom requests.exceptions import ConnectionError\r\nfrom wtforms import IntegerField, SelectField, StringField\r\nfrom wtforms.validators import DataRequired\r\n\r\nimport urllib.request\r\nimport json\r\n\r\nclass ClientDataForm(FlaskForm):\r\n sulphates = StringField('Sulphates [0.33, 2]', validators=[DataRequired()])\r\n free_sulfur_dioxide = StringField('Free Sulfur Dioxide [1, 72]', validators=[DataRequired()])\r\n total_sulfur_dioxide = StringField('Total Sulfur Dioxide [6, 289]', validators=[DataRequired()])\r\n pH = StringField('pH [2.74, 4.01]', validators=[DataRequired()])\r\n\r\n\r\napp = Flask(__name__)\r\napp.config.update(\r\n CSRF_ENABLED=True,\r\n SECRET_KEY='you-will-never-guess',\r\n)\r\n\r\ndef get_prediction(description, company_profile, benefits):\r\n body = {\"sulphate\": [sulphates],\r\n \"free_sulfur_dioxide\":[free_sulfur_dioxide],\r\n \"total_sulfur_dioxide\": [total_sulfur_dioxide],\r\n \"pH\": [pH]}\r\n\r\n myurl = \"http://0.0.0.0:8180/predict\"\r\n req = urllib.request.Request(myurl)\r\n req.add_header('Content-Type', 'application/json; charset=utf-8')\r\n jsondata = json.dumps(body)\r\n jsondataasbytes = jsondata.encode('utf-8') # needs to be bytes\r\n req.add_header('Content-Length', len(jsondataasbytes))\r\n #print (jsondataasbytes)\r\n response = urllib.request.urlopen(req, jsondataasbytes)\r\n return json.loads(response.read())['predictions']\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/predicted/')\r\ndef predicted(response):\r\n response = json.loads(response)\r\n print(response)\r\n return render_template('predicted.html', response=response)\r\n\r\n\r\n@app.route('/predict_form', methods=['GET', 'POST'])\r\ndef predict_form():\r\n form = ClientDataForm()\r\n data = dict()\r\n if request.method == 'POST':\r\n data['sulphate'] = request.form.get('sulphate')\r\n data['free_sulfur_dioxide'] = request.form.get('free_sulfur_dioxide')\r\n data['total_sulfur_dioxide'] = request.form.get('total_sulfur_dioxide')\r\n data['pH'] = request.form.get('pH')\r\n\r\n\r\n try:\r\n response = str(get_prediction(data['sulphate'],\r\n data['free_sulfur_dioxide'],\r\n data['total_sulfur_dioxide'],\r\n data['pH']))\r\n print(response)\r\n except ConnectionError:\r\n response = json.dumps({\"error\": \"ConnectionError\"})\r\n return redirect(url_for('predicted', response=response))\r\n return render_template('form.html', form=form)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=8181, debug=True)", "sub_path": "app/front/run_front_server.py", "file_name": "run_front_server.py", "file_ext": "py", "file_size_in_byte": 2818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask_wtf.FlaskForm", "line_number": 12, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 13, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 13, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 15, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 15, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 32, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 70, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "176466838", "text": "from django import forms\nfrom devices.models import Manufacturer\n\n\nclass ManufacturerForm(forms.ModelForm):\n name = forms.CharField(widget=forms.TextInput(\n attrs={\n 'id': 'name',\n 'class': 'form-control',\n 'placeholder': 'HP',\n\n }\n ))\n image = forms.FileField(required=False,\n label='Company Logo',\n widget=forms.FileInput(\n attrs={\n 'accept': \"image/*\",\n }))\n\n class Meta:\n model = Manufacturer\n fields = ['name', 'image']\n", "sub_path": "devices/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms.FileField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.FileInput", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "devices.models.Manufacturer", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "558862826", "text": "from __future__ import print_function\n\nimport os\nimport string\nimport tempfile\n\nimport pygraphviz\nfrom networkx.drawing import nx_agraph\n\nJAR_DIR = './baselines/constraint'\n\n\ndef regex2dfa(reg_ex, letter='q'):\n transfer_file = tempfile.NamedTemporaryFile(mode='w+')\n command = 'java -jar {}/regex2dfa.jar \"{}\" {}'.format(\n JAR_DIR, reg_ex, transfer_file.name)\n os.system(command)\n\n with open(transfer_file.name) as fname:\n dot = fname.read()\n print(dot, file=open('{}.dot'.format(transfer_file.name), 'w'))\n return nx_agraph.from_agraph(pygraphviz.AGraph(dot))\n\n\nclass DFA:\n def __init__(self, reg_ex):\n self.dfa = regex2dfa(reg_ex)\n self.current_state = 'q0'\n self.num_states = len(self.dfa.nodes())\n self.state_ids = dict(zip(self.dfa.nodes(), range(self.num_states)))\n\n def step(self, action):\n is_accept, self.current_state = self._traverse_dfa(\n action, self.current_state)\n return is_accept\n\n def reset(self):\n self.current_state = 'q0'\n\n def _traverse_dfa(self, char, start):\n \"\"\"\n dfa_dot: dfa in graphviz dot file\n first return value shows if next state is an accept state\n second return value is the next state\n \"\"\"\n # convert [1-2][0-9] | 3[0-5] to letter in the upper case alph.\n if char != 's' and int(char) >= 10 and int(char) <= 35:\n i = int(char) - 10\n char = '\"{}\"'.format(string.ascii_uppercase[i])\n\n dfa = self.dfa\n accept_states = [\n n for n in dfa.nodes()\n if dfa.nodes.data('shape')[n] == 'doublecircle'\n ]\n edges = dfa.edges.data('label')\n transitions = list(filter(lambda x: x[0] == start, edges))\n for transition in transitions:\n if transition[2] == str(char):\n next_state = transition[1]\n if next_state in accept_states:\n return True, next_state\n else:\n return False, next_state\n\n return False, 'q0'\n\n def states(self):\n return [str(n) for n in self.dfa.nodes()]\n\n def accepting_states(self):\n return [\n str(n) for n in self.dfa.nodes()\n if self.dfa.nodes.data('shape')[n] == 'doublecircle'\n ]\n\n def state_id(self):\n return self.state_ids[self.current_state]\n", "sub_path": "mujoco_experiments/baselines/constraint/dfa.py", "file_name": "dfa.py", "file_ext": "py", "file_size_in_byte": 2388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tempfile.NamedTemporaryFile", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "networkx.drawing.nx_agraph.from_agraph", "line_number": 22, "usage_type": "call"}, {"api_name": "networkx.drawing.nx_agraph", "line_number": 22, "usage_type": "name"}, {"api_name": "pygraphviz.AGraph", "line_number": 22, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 49, "usage_type": "attribute"}]}
+{"seq_id": "555666360", "text": "from django.conf.urls import url\nfrom . import views\napp_name = 'credit'\n\nurlpatterns =[\n # url(r'^$', views.credit, name='credit'),\n # url(r'^reyting/$', views.creditreyting, {'template_name : reyting.html'})\n url(r'^$', views.Listcredit, name='Listcreditrateinf'),\n url(r'^(?P[-\\w]+)/$', views.Listcredit, name='ListCreditViews'),\n url(r'^(?P\\d+)/(?P[-\\w]+)/$', views.DetailCredit, name='DetailCreditViews')\n\n]", "sub_path": "credit/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "277314246", "text": "import numpy as np\nimport math as math\nimport matplotlib.pyplot as plt\nimport h5py as h5py\nfrom sphere_diffusion_linear_fixedconc import sdl_analytical\n\n\ndef readh5file(h5file, nx, nr):\n length = 5.0\n radius = 0.028\n cc = []\n tt = []\n t0 = 0.0\n\n f0 = h5py.File(h5file, 'r')\n\n j = 0 \n for i in f0.keys():\n if j == 0:\n cc = f0[i][:, 0]\n j = j + 1\n else:\n cc = np.column_stack((cc, f0[i][:, 0]))\n t = i.split()\n tt.append(t0 + float(t[2]))\n f0.close()\n\n res = {}\n res['t'] = np.asarray(tt)\n res['c'] = np.asarray(cc)\n res['x'] = np.linspace(0, length, nx)\n r1 = np.linspace(radius, 0, nr+1)\n rr = np.zeros(nr)\n for i in range(nr):\n rr[i] = 0.5 * (r1[i] + r1[i+1]) \n res['r'] = rr * 10.0\n\n return res\n\n\nr1 = readh5file('sde_5x10x20.h5', 5, 10)\nr2 = readh5file('sde_5x10x40.h5', 5, 10)\nr3 = readh5file('sde_5x10x100.h5', 5, 10)\ncin = 1.0 #1.9941e-07;\nnr = 100\n\nlx = 0.08\nly = 0.90\n\nit1 = 0\nit2 = 4 \nit3 = 9 \nit4 = 19\nitt = [r1['t'][it1], r1['t'][it2], r1['t'][it3], r1['t'][it4]] \n\nra = np.linspace(0.0028, 0.28, 101)\nc4 = sdl_analytical(ra/10.0, itt, 0.028, 1.0e-6) \n\nax1 = plt.subplot(2, 2, 1)\ni = it1\nnx = 5\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\n\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 0], 'm-')\nplt.ylabel('c/c$_0$')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(a) t = %d s' % (r1['t'][i]), transform=ax1.transAxes)\nplt.setp(ax1.get_xticklabels(), visible=False)\n\nax2 = plt.subplot(2, 2, 2)\ni = it2\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 1], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.setp(ax2.get_yticklabels(), visible=False)\nplt.text(lx, ly, '(b) t = %d s' % (r1['t'][i]), transform=ax2.transAxes)\nplt.setp(ax2.get_xticklabels(), visible=False)\n\nax3 = plt.subplot(2, 2, 3)\ni = it3\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 2], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(c) t = %d s' % (r1['t'][i]), transform=ax3.transAxes)\nplt.xlabel('r (mm)')\nplt.ylabel('c/c$_0$')\n\nax4 = plt.subplot(2, 2, 4)\ni = it4\nnr = 10\nc1 = np.asarray(r1['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc2 = np.asarray(r2['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nc3 = np.asarray(r3['c'][:, i].reshape(nr+1, nx))[1:nr+1, 0]\nplt.plot(r1['r'], c1/cin, 'bo', r2['r'], c2/cin, 'rx', r3['r'], c3/cin, 'g+', ra, c4[:, 3], 'm-')\nplt.xlim([0, 0.28])\nplt.ylim([0, 1.2])\nplt.text(lx, ly, '(d) t = %d s' % (r1['t'][i]), transform=ax4.transAxes)\nplt.xlabel('r (mm)')\nplt.setp(ax4.get_yticklabels(), visible=False)\nlgd = plt.legend(('nt = 20', 'nt = 50', 'nt = 100', 'Analytical'),loc=3)\nlgd.draw_frame(False)\n#txt = lgd.get_texts()\n#plt.setp(txt, fontsize='small') \n\nfig = plt.gcf()\nfig.subplots_adjust(left=0.08, right=0.95, wspace=0.05, hspace=0.08)\nfig.set_size_inches(8, 6)\nplt.savefig('prof.pdf')\nplt.show()\n", "sub_path": "tests/sde/equaldist-dt/prof.py", "file_name": "prof.py", "file_ext": "py", "file_size_in_byte": 3435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "h5py.File", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "sphere_diffusion_linear_fixedconc.sdl_analytical", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}]}
+{"seq_id": "170488911", "text": "import os\nimport sys\nsys.path.append('../')\n\nfrom dateutil.parser import parse\n\nfrom middleware.rabbitmq_queue import RabbitMQQueue\nfrom middleware.rabbitmq_queues import RabbitMQQueues\nfrom middleware.log import Logger\nfrom middleware.shenlong_status_sender import ShenlongStatusSender\n\nTWIT_ID = 0\nAUTHOR_ID = 1\nINBOUND = 2\nCREATED_AT = 3\nTEXT = 4\n\nNUM_COLUMS = 7\n\nIS_CUSTOMER = \"True\"\n\nRECEIVE_QUEUE_NAME = \"preprocesed_twits\"\nSEND_QUEUE_NAME = \"raw_twits\"\nHEALTH_QUEUE = \"pings\"\n\nclass FilterParser(object):\n def __init__(self, send_queues, receive_queue, shenlong_sender, logger):\n self.send_queues = send_queues\n self.receive_queue = receive_queue\n self.shenlong_sender = shenlong_sender\n self.logger = logger\n self.received_twits = {}\n\n def run(self):\n self.logger.log(\"Start consuming\")\n self.shenlong_sender.start()\n self.receive_queue.consume(self._callback, self._eoj_callback)\n self.logger.log(\"Sending EOM to queues\")\n self.send_queues.send_eom()\n self.logger.log(\"Finish\")\n self.shenlong_sender.stop()\n self.shenlong_sender.join()\n\n def _callback(self, ch, method, properties, decoded_body):\n self.logger.log_with_frequency(\"Received line %s\", decoded_body)\n\n body_values = decoded_body.rstrip().split(\",\")\n twit_id = body_values[TWIT_ID]\n\n if (len(body_values) != NUM_COLUMS) or (body_values[INBOUND] != IS_CUSTOMER) or (twit_id in self.received_twits):\n self.logger.log_with_frequency(\"Twit discarted\")\n ch.basic_ack(delivery_tag = method.delivery_tag)\n return\n\n day = str(parse(body_values[CREATED_AT]).date())\n\n self.logger.log_with_frequency(\"Sending parsed value\")\n\n self.send_queues.send(\"{},{},{},{}\".format(body_values[TWIT_ID], body_values[AUTHOR_ID], day, body_values[TEXT]), body_values[TWIT_ID])\n\n self.received_twits[twit_id] = True\n\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\n def _eoj_callback(self, eoj_msg):\n self.logger.log(\"Received EOJ\")\n self.send_queues.send_eoj(eoj_msg)\n self.logger.log(\"Send EOJ\")\n\nif __name__ == '__main__':\n rabbitmq_host = os.environ['RABBITMQ_HOST']\n filter_parser_workers = int(os.environ['FILTER_PARSER_WORKERS'])\n analyzer_workers = int(os.environ['ANALYZER_WORKERS'])\n\n worker_id = os.environ['SERVICE_ID']\n\n log_file = os.environ['LOG_FILE']\n log_frequency = int(os.environ['LOG_FREQUENCY'])\n\n send_queues = RabbitMQQueues(RECEIVE_QUEUE_NAME, rabbitmq_host, analyzer_workers)\n receive_queue = RabbitMQQueue(\"{}{}\".format(SEND_QUEUE_NAME, worker_id), rabbitmq_host)\n health_queue = RabbitMQQueue(HEALTH_QUEUE, rabbitmq_host)\n\n shenlong_sender = ShenlongStatusSender(\"FILTER-PARSER\", worker_id, health_queue)\n logger = Logger(\"FILTER PARSER [{}]\".format(worker_id), log_file, log_frequency)\n\n worker = FilterParser(send_queues, receive_queue, shenlong_sender, logger)\n\n logger.log(\"Worker created, started running\")\n worker.run()\n logger.log(\"Worker finished, exiting\")\n", "sub_path": "src/filter_parser/filter_parser.py", "file_name": "filter_parser.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 55, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 78, "usage_type": "attribute"}, {"api_name": "middleware.rabbitmq_queues.RabbitMQQueues", "line_number": 80, "usage_type": "call"}, {"api_name": "middleware.rabbitmq_queue.RabbitMQQueue", "line_number": 81, "usage_type": "call"}, {"api_name": "middleware.rabbitmq_queue.RabbitMQQueue", "line_number": 82, "usage_type": "call"}, {"api_name": "middleware.shenlong_status_sender.ShenlongStatusSender", "line_number": 84, "usage_type": "call"}, {"api_name": "middleware.log.Logger", "line_number": 85, "usage_type": "call"}]}
+{"seq_id": "525774534", "text": "import cv2\nimport numpy as np\nimport csv\nimport glob\nimport os\n\nsumclass=[0,0,0,0,0,0,0,0,0]\nsave_path_pola = \"pola kalung\"\n\n#Mengambil gambar tiap folder kelas\nfor class_image_path in glob.glob(\"D:\\PycharmProjects\\PCDSAPI\\kalung sapi\\*\"):\n print(class_image_path)\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 1'): neck_class = 1\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 2'): neck_class = 2\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 3'): neck_class = 3\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 4'): neck_class = 4\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 5'): neck_class = 5\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 6'): neck_class = 6\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 7'): neck_class = 7\n if (class_image_path.split(\"\\\\\")[-1] == 'Kelas 8'): neck_class = 8\n f = True\n class_folder = \"Kelas \"+str(neck_class)\n new_save_path = os.path.join(save_path_pola,class_folder)\n print(\"PATH ==\",new_save_path)\n for image_path in glob.glob(os.path.join(class_image_path, \"*.bmp\")):\n print(image_path)\n # if(neck_class!=7):\n # break\n x=str(neck_class)+\"-class-\"+str(sumclass[neck_class])\n name = x + \"-test.bmp\"\n print(name)\n print(type(name))\n im_gray = cv2.imread(image_path,0)\n thresh = 127\n im_binerr = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1]\n im_gray = cv2.medianBlur(im_gray,5)\n im_biner = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)\n\n arr = []\n v = []\n if(neck_class==7):\n houghparam=35\n else:\n houghparam=55\n\n try:\n\n circles = cv2.HoughCircles(im_gray, cv2.HOUGH_GRADIENT, 1, 100, param1=290, param2=houghparam, minRadius=0, maxRadius=0)\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(im_biner, (i[0], i[1]), i[2], (0, 255, 255), 2)\n cv2.circle(im_biner, (i[0], i[1]), 2, (0, 0, 255), 112)\n\n flag = 1\n row, col, ch = im_biner.shape\n graykanvas = np.zeros((row, col, 1), np.uint8)\n for i in range(0, row):\n for j in range(0, col):\n b, g, r = im_biner[i, j]\n if (b == 255 & g == 0 & r == 0):\n graykanvas.itemset((i, j, 0), 255)\n if (flag == 1):\n x = i\n y = j\n flag = 100\n else:\n graykanvas.itemset((i, j, 0), 0)\n\n im_hasil = cv2.subtract(graykanvas, im_gray)\n\n hasil_crop = im_hasil[x:x + 112, y - 56:y + 56] # im awe [y,x]\n cv2.imshow(\"hasil crop\", hasil_crop)\n thresh = 130\n\n kernel = np.ones((5, 5), np.uint8)\n\n crop_biner = cv2.threshold(hasil_crop, thresh, 255, cv2.THRESH_BINARY)[1]\n\n\n\n cv2.imwrite(os.path.join(new_save_path,name),crop_biner)\n\n row, col= crop_biner.shape\n for r in range(0,row):\n a = 0\n for c in range(0,col):\n if crop_biner[r,c]==255:\n crop_biner[r,c]=1\n a+=crop_biner[r,c]\n v.append(a)\n # print(v)\n # print(r)\n print(len(v))\n print(\"tipe\",type(v))\n print(v)\n v=v/max(v)\n v=[int(round(l)) for l in v]\n\n arr.append(name)\n for d in v:\n arr.append(d)\n arr.append(neck_class)\n print(arr)\n\n\n csvfile = \"datavector.csv\"\n\n with open(csvfile, 'a+',newline='') as output:\n writer = csv.writer(output, lineterminator=',')\n for val in arr[:-1]:\n writer.writerow([val])\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerow([arr[-1]])\n\n sumclass[neck_class]=sumclass[neck_class]+1\n\n except Exception:\n pass\n\n if (sumclass[neck_class]==3):\n break\n", "sub_path": "hough.py", "file_name": "hough.py", "file_ext": "py", "file_size_in_byte": 4174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.HoughCircles", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.HOUGH_GRADIENT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.subtract", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 109, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 112, "usage_type": "call"}]}
+{"seq_id": "280440163", "text": "\"\"\"\nDefinition of urls for AvalFrameWeb.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\nfrom django.conf.urls import include\n\nimport app.forms\nfrom app.views import home\nfrom app.views import analise_dados\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n #url(r'^analise_dados/', include('app.urls.analise_dados.urls')),\n url(r'^$', home.home, name='home'),\n url(r'^competencias/', include('app.urls.competencias.urls')),\n url(r'^niveis_competencia_avaliacao/', include('app.urls.niveis_competencia_avaliacao.urls')),\n url(r'^aprendizagens/', include('app.urls.aprendizagens.urls')),\n url(r'^niveis_aprendizagem/', include('app.urls.niveis_aprendizagem.urls')),\n url(r'^jogos_digitais/', include('app.urls.jogos_digitais.urls')),\n url(r'^niveis_jogo/', include('app.urls.niveis_jogo.urls')),\n url(r'^aeej/', include('app.urls.aeej.urls')),\n url(r'^dispositivos_captura/', include('app.urls.dispositivos_captura.urls')),\n url(r'^jogadores/', include('app.urls.jogadores.urls')),\n url(r'^aprendizagens_aeej/', include('app.urls.aprendizagens_aeej.urls')),\n url(r'^competencias_aprendizagens/', include('app.urls.competencias_aprendizagens.urls')),\n url(r'^etapas_jogo/', include('app.urls.etapas_jogo.urls')),\n url(r'^fases_jogo/', include('app.urls.fases_jogo.urls')),\n \n \n url(r'^carga_aprendizagens/', include('app.urls.carga_aprendizagens.urls')),\n \n url(r'^geracao_relatorio/', include('app.urls.geracao_relatorio.urls')),\n \n \n\n #url(r'^login/$',\n # django.contrib.auth.views.login,\n # {\n # 'template_name': 'app/login.html',\n # 'authentication_form': app.forms.BootstrapAuthenticationForm,\n # 'extra_context':\n # {\n # 'title': 'Log in',\n # 'year': datetime.now().year,\n # }\n # },\n # name='login'),\n #url(r'^logout$',\n # django.contrib.auth.views.logout,\n # {\n # 'next_page': '/',\n # },\n # name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n]\n", "sub_path": "AvalFrameWeb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "app.views.home.home", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.views.home", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "124486028", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-eeg-fsaverage-source-modeling:\n\nEEG forward operator with a template MRI\n========================================\n\nThis tutorial explains how to compute the forward operator from EEG data\nusing the standard template MRI subject ``fsaverage``.\n\n.. caution:: Source reconstruction without an individual T1 MRI from the\n subject will be less accurate. Do not over interpret\n activity locations which can be off by multiple centimeters.\n\n.. contents:: This tutorial covers:\n :local:\n :depth: 2\n\n\"\"\"\n# Authors: Alexandre Gramfort \n# Joan Massich \n#\n# License: BSD Style.\n\nimport os.path as op\n\nimport mne\nfrom mne.datasets import eegbci\nfrom mne.datasets import fetch_fsaverage\n\n# Download fsaverage files\nfs_dir = fetch_fsaverage(verbose=True)\nsubjects_dir = op.dirname(fs_dir)\n\n# The files live in:\nsubject = 'fsaverage'\ntrans = 'fsaverage' # MNE has a built-in fsaverage transformation\nsrc = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')\nbem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')\n\n##############################################################################\n# Load the data\n# -------------\n#\n# We use here EEG data from the BCI dataset.\n#\n# .. note:: See :ref:`plot_montage` to view all the standard EEG montages\n# available in MNE-Python.\n\nraw_fname, = eegbci.load_data(subject=1, runs=[6])\nraw = mne.io.read_raw_edf(raw_fname, preload=True)\n\n# Clean channel names to be able to use a standard 1005 montage\nnew_names = dict(\n (ch_name,\n ch_name.rstrip('.').upper().replace('Z', 'z').replace('FP', 'Fp'))\n for ch_name in raw.ch_names)\nraw.rename_channels(new_names)\n\n# Read and set the EEG electrode locations\nmontage = mne.channels.make_standard_montage('standard_1005')\n\nraw.set_montage(montage)\nraw.set_eeg_reference(projection=True) # needed for inverse modeling\n\n# Check that the locations of EEG electrodes is correct with respect to MRI\nmne.viz.plot_alignment(\n raw.info, src=src, eeg=['original', 'projected'], trans=trans,\n show_axes=True, mri_fiducials=True, dig='fiducials')\n\n##############################################################################\n# Setup source space and compute forward\n# --------------------------------------\n\nfwd = mne.make_forward_solution(raw.info, trans=trans, src=src,\n bem=bem, eeg=True, mindist=5.0, n_jobs=1)\nprint(fwd)\n\n# for illustration purposes use fwd to compute the sensitivity map\neeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')\neeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,\n clim=dict(lims=[5, 50, 100]))\n", "sub_path": "0.21/_downloads/41f4872bb7e7ad4ec492ad557209d3d7/plot_eeg_no_mri.py", "file_name": "plot_eeg_no_mri.py", "file_ext": "py", "file_size_in_byte": 2723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mne.datasets.fetch_fsaverage", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "name"}, {"api_name": "mne.datasets.eegbci.load_data", "line_number": 50, "usage_type": "call"}, {"api_name": "mne.datasets.eegbci", "line_number": 50, "usage_type": "name"}, {"api_name": "mne.io.read_raw_edf", "line_number": 51, "usage_type": "call"}, {"api_name": "mne.io", "line_number": 51, "usage_type": "attribute"}, {"api_name": "mne.channels.make_standard_montage", "line_number": 61, "usage_type": "call"}, {"api_name": "mne.channels", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mne.viz.plot_alignment", "line_number": 67, "usage_type": "call"}, {"api_name": "mne.viz", "line_number": 67, "usage_type": "attribute"}, {"api_name": "mne.make_forward_solution", "line_number": 75, "usage_type": "call"}, {"api_name": "mne.sensitivity_map", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "283079721", "text": "# Copyright (C) 2018 Apple Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\nimport os\nimport subprocess\n\nimport ews.config as config\n\n_log = logging.getLogger(__name__)\n\n\nclass Buildbot():\n @classmethod\n def send_patch_to_buildbot(cls, patch_path, properties=[]):\n command = ['buildbot', 'try',\n '--connect=pb',\n '--master={}:{}'.format(config.BUILDBOT_SERVER_HOST, config.BUILDBOT_SERVER_PORT),\n '--username={}'.format(config.BUILDBOT_PB_USERNAME),\n '--passwd={}'.format(config.BUILDBOT_PB_PASSWORD),\n '--diff={}'.format(patch_path),\n '--repository=']\n\n for property in properties:\n command.append('--property={}'.format(property))\n\n _log.debug('Executing command: {}'.format(command))\n return_code = subprocess.call(command)\n if return_code:\n _log.warn('Error executing: {}, return code={}'.format(command, return_code))\n\n return return_code\n", "sub_path": "Tools/BuildSlaveSupport/ews-app/ews/common/buildbot.py", "file_name": "buildbot.py", "file_ext": "py", "file_size_in_byte": 2291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "ews.config.BUILDBOT_SERVER_HOST", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 37, "usage_type": "name"}, {"api_name": "ews.config.BUILDBOT_SERVER_PORT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ews.config.BUILDBOT_PB_USERNAME", "line_number": 38, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 38, "usage_type": "name"}, {"api_name": "ews.config.BUILDBOT_PB_PASSWORD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ews.config", "line_number": 39, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "371835123", "text": "#!/usr/bin/env python\nimport os\nfrom contextlib import contextmanager\nfrom portal.utils import parse_date\nfrom dotenv import load_dotenv\nfrom portal.database import db\nfrom portal.etl.database import (\n etl_import_database,\n recruit_table,\n recruit_summary_table,\n delegate_table,\n practice_table,\n practice_group_table,\n practice_groups_practices_table,\n practice_status_table,\n exclusion_reason_table,\n)\nfrom portal.models import (\n Recruit,\n RecruitSummary,\n Delegate,\n Practice,\n PracticeGroup,\n PracticeStatus,\n ExclusionReason,\n)\nfrom portal import create_app\n\n\ndef import_practice_status():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_status (\n id INT PRIMARY KEY,\n name VARCHAR(255)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE UNIQUE INDEX idx__etl_practice_status__name ON etl_practice_status(name);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_status_table.select()):\n imports.append(PracticeStatus(\n id=r['id'],\n name=r['name'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_recruits():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_recruit (\n status VARCHAR(255),\n nhs_number VARCHAR(255),\n study_id VARCHAR(255),\n practice_code VARCHAR(255),\n first_name VARCHAR(64),\n last_name VARCHAR(64),\n date_of_birth DATE,\n civicrm_contact_id INT,\n civicrm_case_id INT PRIMARY KEY,\n recruited_date DATE,\n invoice_year VARCHAR(255),\n invoice_quarter VARCHAR(255),\n reimbursed_status VARCHAR(255)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit__nhs_number ON etl_recruit(nhs_number);\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit__practice_code ON etl_recruit(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(recruit_table.select()):\n imports.append(Recruit(\n status=r['status'],\n nhs_number=r['nhs_number'],\n study_id=r['study_id'],\n practice_code=r['practice_code'],\n first_name=r['first_name'],\n last_name=r['last_name'],\n date_of_birth=r['date_of_birth'],\n civicrm_contact_id=r['civicrm_contact_id'],\n civicrm_case_id=r['civicrm_case_id'],\n recruited_date=r['recruited_date'],\n invoice_year=r['invoice_year'],\n invoice_quarter=r['invoice_quarter'],\n reimbursed_status=r['reimbursed_status'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_recruit_summary():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_recruit_summary (\n practice_code VARCHAR(100),\n recruited INTEGER,\n excluded INTEGER,\n withdrawn INTEGER,\n last_recruited_date DATE,\n excluded_percentage DECIMAL(30, 4),\n withdrawn_percentage DECIMAL(30, 4)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_recruit_summary__practice_code ON etl_recruit_summary(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(recruit_summary_table.select()):\n imports.append(RecruitSummary(\n practice_code=r['practice_code'],\n recruited=r['recruited'],\n excluded=int(r['excluded']),\n withdrawn=int(r['withdrawn']),\n last_recruited_date=r['last_recruited_date'],\n excluded_percentage=r['excluded_percentage'],\n withdrawn_percentage=r['withdrawn_percentage'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_delegates():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_delegate (\n practice_code VARCHAR(255),\n instance INT,\n name VARCHAR(255),\n role VARCHAR(255),\n gcp_trained BIT,\n gv_trained BIT,\n on_delegation_log_yn BIT,\n gv_start_del_log DATE,\n gv_end_del_log DATE,\n rsn_not_on_del_log VARCHAR(500),\n gv_phone_a VARCHAR(100),\n gv_phone_b VARCHAR(100),\n contact_email_add VARCHAR(100),\n primary_contact_yn BIT\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_delegates__practice_code ON etl_delegate(practice_code);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(delegate_table.select()):\n imports.append(Delegate(\n practice_code=r['practice_code'],\n instance=r['instance'],\n name=r['name'],\n role=r['role'],\n gcp_trained=r['gcp_trained'],\n gv_trained=r['gv_trained'],\n on_delegation_log_yn=r['on_delegation_log_yn'],\n gv_start_del_log=parse_date(r['gv_start_del_log']),\n gv_end_del_log=parse_date(r['gv_end_del_log']),\n gv_phone_a=r['gv_phone_a'],\n gv_phone_b=r['gv_phone_b'],\n contact_email_add=r['contact_email_add'],\n primary_contact_yn=r['primary_contact_yn'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practices():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_detail (\n project_id INT,\n ccg INT,\n federation INT,\n code VARCHAR(255),\n name VARCHAR(255),\n street_address VARCHAR(255),\n town VARCHAR(255),\n city VARCHAR(255),\n county VARCHAR(255),\n postcode VARCHAR(255),\n partners VARCHAR(255),\n collab_ag_comp_yn BIT,\n collab_ag_signed_date_str VARCHAR(100),\n isa_comp_yn BIT,\n isa_1_signed_date_str VARCHAR(255),\n isa_1_caldicott_guard_end_str VARCHAR(255),\n agree_66_comp_yn BIT,\n agree_66_signed_date_1_str VARCHAR(255),\n agree_66_end_date_2_str VARCHAR(255),\n genvasc_initiated BIT,\n status_id INT NULL\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__practice_code ON etl_practice_detail(code);\n \"\"\")\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__ccg ON etl_practice_detail(ccg);\n \"\"\")\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_detail__federation ON etl_practice_detail(federation);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_table.select()):\n imports.append(Practice(\n project_id=r['project_id'],\n code=r['code'],\n name=r['name'],\n ccg=r['ccg'],\n street_address=r['street_address'],\n town=r['town'],\n city=r['city'],\n county=r['county'],\n postcode=r['postcode'],\n federation=r['federation'],\n partners=r['partners'],\n collab_ag_comp_yn=r['collab_ag_comp_yn'],\n collab_ag_signed_date_str=parse_date(r['collab_ag_signed_date_str']),\n isa_comp_yn=r['isa_comp_yn'],\n isa_1_signed_date_str=parse_date(r['isa_1_signed_date_str']),\n isa_1_caldicott_guard_end_str=parse_date(r['isa_1_caldicott_guard_end_str']),\n agree_66_comp_yn=r['agree_66_comp_yn'],\n agree_66_signed_date_1_str=parse_date(r['agree_66_signed_date_1_str']),\n agree_66_end_date_2_str=parse_date(r['agree_66_end_date_2_str']),\n genvasc_initiated=r['genvasc_initiated'] in ('1', 1),\n status_id=r['status_id'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practice_groups():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_group (\n project_id INT,\n identifier VARCHAR(255),\n type VARCHAR(255),\n name VARCHAR(255),\n PRIMARY KEY (project_id, identifier, type)\n );\n \"\"\")\n\n db.engine.execute(\"\"\"\n CREATE INDEX idx__etl_practice_group__type ON etl_practice_group(type);\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_group_table.select()):\n imports.append(PracticeGroup(\n project_id=r['project_id'],\n type=r['type'],\n identifier=r['identifier'],\n name=r['name'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\ndef import_practice_groups_practices():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_practice_groups_practices (\n practice_group_type VARCHAR(200),\n practice_group_project_id INT,\n practice_group_identifier INT,\n practice_code VARCHAR(255),\n PRIMARY KEY (practice_group_type, practice_group_project_id, practice_group_identifier, practice_code)\n );\n \"\"\")\n\n with etl_import_database() as r_db:\n for r in r_db.execute(practice_groups_practices_table.select()):\n try:\n p = Practice.query.filter_by(code=r['practice_code']).one()\n pg = PracticeGroup.query.filter_by(\n type=r['practice_group_type'],\n project_id=r['practice_group_project_id'],\n identifier=r['practice_group_identifier'],\n ).one()\n\n pg.practices.add(p)\n db.session.add(pg)\n except:\n print(r['practice_group_type'])\n print(r['practice_group_project_id'])\n print(r['practice_group_identifier'])\n\n db.session.commit()\n\n\ndef import_exclusion_reasons():\n db.engine.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS etl_exclusion_reason (\n civicrm_case_id INT PRIMARY KEY,\n details VARCHAR(500)\n );\n \"\"\")\n\n imports = []\n\n with etl_import_database() as r_db:\n for r in r_db.execute(exclusion_reason_table.select()):\n imports.append(ExclusionReason(\n civicrm_case_id=r['civicrm_case_id'],\n details=r['details'],\n ))\n\n db.session.add_all(imports)\n db.session.flush()\n\n db.session.commit()\n\n\n# Load environment variables from '.env' file.\nload_dotenv()\n\napp = create_app()\ncontext = app.app_context()\ncontext.push()\n\nimport_practice_status()\nimport_practice_groups()\nimport_practices()\nimport_recruits()\nimport_recruit_summary()\nimport_delegates()\nimport_practice_groups_practices()\nimport_exclusion_reasons()\n\ncontext.pop()\n", "sub_path": "dev_import.py", "file_name": "dev_import.py", "file_ext": "py", "file_size_in_byte": 11395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "portal.database.db.engine.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 31, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 31, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 38, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 38, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 38, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 44, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_status_table.select", "line_number": 45, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_status_table", "line_number": 45, "usage_type": "name"}, {"api_name": "portal.models.PracticeStatus", "line_number": 46, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 51, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 51, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 51, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 52, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 52, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 52, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 54, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 54, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 58, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 58, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 58, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 76, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 76, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 76, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 80, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 80, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 80, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 86, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_table.select", "line_number": 87, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_table", "line_number": 87, "usage_type": "name"}, {"api_name": "portal.models.Recruit", "line_number": 88, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 104, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 104, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 104, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 105, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 105, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 105, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 107, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 107, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 107, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 111, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 111, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 111, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 123, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 123, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 123, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 129, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_summary_table.select", "line_number": 130, "usage_type": "call"}, {"api_name": "portal.etl.database.recruit_summary_table", "line_number": 130, "usage_type": "name"}, {"api_name": "portal.models.RecruitSummary", "line_number": 131, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 141, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 141, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 141, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 142, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 142, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 142, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 144, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 144, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 144, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 148, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 148, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 148, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 167, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 167, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 167, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 173, "usage_type": "call"}, {"api_name": "portal.etl.database.delegate_table.select", "line_number": 174, "usage_type": "call"}, {"api_name": "portal.etl.database.delegate_table", "line_number": 174, "usage_type": "name"}, {"api_name": "portal.models.Delegate", "line_number": 175, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 183, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 184, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 191, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 191, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 191, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 192, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 192, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 192, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 194, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 194, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 194, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 198, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 198, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 198, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 224, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 224, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 224, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 227, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 227, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 227, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 230, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 230, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 230, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 236, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_table.select", "line_number": 237, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_table", "line_number": 237, "usage_type": "name"}, {"api_name": "portal.models.Practice", "line_number": 238, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 251, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 253, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 254, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 256, "usage_type": "call"}, {"api_name": "portal.utils.parse_date", "line_number": 257, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 262, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 262, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 262, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 263, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 263, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 263, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 265, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 265, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 265, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 269, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 269, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 269, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 279, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 279, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 279, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 285, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_group_table.select", "line_number": 286, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_group_table", "line_number": 286, "usage_type": "name"}, {"api_name": "portal.models.PracticeGroup", "line_number": 287, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 294, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 294, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 294, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 295, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 295, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 295, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 297, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 297, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 297, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 301, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 301, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 301, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 311, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_groups_practices_table.select", "line_number": 312, "usage_type": "call"}, {"api_name": "portal.etl.database.practice_groups_practices_table", "line_number": 312, "usage_type": "name"}, {"api_name": "portal.models.Practice.query.filter_by", "line_number": 314, "usage_type": "call"}, {"api_name": "portal.models.Practice.query", "line_number": 314, "usage_type": "attribute"}, {"api_name": "portal.models.Practice", "line_number": 314, "usage_type": "name"}, {"api_name": "portal.models.PracticeGroup.query.filter_by", "line_number": 315, "usage_type": "call"}, {"api_name": "portal.models.PracticeGroup.query", "line_number": 315, "usage_type": "attribute"}, {"api_name": "portal.models.PracticeGroup", "line_number": 315, "usage_type": "name"}, {"api_name": "portal.database.db.session.add", "line_number": 322, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 322, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 322, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 328, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 328, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 328, "usage_type": "name"}, {"api_name": "portal.database.db.engine.execute", "line_number": 332, "usage_type": "call"}, {"api_name": "portal.database.db.engine", "line_number": 332, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 332, "usage_type": "name"}, {"api_name": "portal.etl.database.etl_import_database", "line_number": 341, "usage_type": "call"}, {"api_name": "portal.etl.database.exclusion_reason_table.select", "line_number": 342, "usage_type": "call"}, {"api_name": "portal.etl.database.exclusion_reason_table", "line_number": 342, "usage_type": "name"}, {"api_name": "portal.models.ExclusionReason", "line_number": 343, "usage_type": "call"}, {"api_name": "portal.database.db.session.add_all", "line_number": 348, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 348, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 348, "usage_type": "name"}, {"api_name": "portal.database.db.session.flush", "line_number": 349, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 349, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 349, "usage_type": "name"}, {"api_name": "portal.database.db.session.commit", "line_number": 351, "usage_type": "call"}, {"api_name": "portal.database.db.session", "line_number": 351, "usage_type": "attribute"}, {"api_name": "portal.database.db", "line_number": 351, "usage_type": "name"}, {"api_name": "dotenv.load_dotenv", "line_number": 355, "usage_type": "call"}, {"api_name": "portal.create_app", "line_number": 357, "usage_type": "call"}]}
+{"seq_id": "284114919", "text": "\"\"\"andapp URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom webapp.views import comment, message_user, message_customer, friend_delete, friend_add, busket, busket_delete, dream, dream_delete, like, logout, check_user, insert_user, index_main_content, index_customer_main, index_main, index_registration, index_search, index_profile, index_busket, index_dream, index_store, index_about, customer_main, customer_profile, customer_goods, customer_messages, customer_statistics, customer_main_content, customer_main_content_edit, insert_customer, check_customer, insert_product, update_product\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index_main),\n url(r'^index_main/$', index_main),\n url(r'^registration/$', index_registration),\n url(r'^search/$', index_search),\n url(r'^logout/$', logout),\n\n url(r'^index_main_content/$', index_main_content),\n url(r'^index_customer_main/$', index_customer_main),\n url(r'^profile/$', index_profile),\n url(r'^basket/$', index_busket), \n url(r'^dream/$', index_dream),\n url(r'^store/$', index_store), \n url(r'^about/$', index_about), \n url(r'^insert_user/$', insert_user),\n url(r'^check_user/$', check_user),\n url(r'^like/$', like),\n url(r'^dream_insert/$', dream),\n url(r'^dream_delete/$', dream_delete),\n url(r'^busket/$', busket), \n url(r'^busket_delete/$', busket_delete),\n url(r'^friend_add/$', friend_add),\n url(r'^friend_delete/$', friend_delete), \n url(r'^message_user/$', message_user),\n url(r'^message_customer/$', message_customer),\n url(r'^comment/$', comment), \n \n \n\n url(r'^customer_main/$', customer_main),\n url(r'^customer_profile/$', customer_profile),\n url(r'^customer_goods/$', customer_goods),\n url(r'^customer_messages/$', customer_messages),\n url(r'^customer_statistics/$', customer_statistics),\n \n url(r'^insert_customer/$', insert_customer),\n url(r'^check_customer/$', check_customer),\n url(r'^update_product/$', update_product),\n url(r'^insert_product/$', insert_product),\n\n url(r'^customer_main_content/$', customer_main_content),\n url(r'^customer_main_content_edit/$', customer_main_content_edit),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "andapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2935, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "webapp.views.index_main", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "webapp.views.index_main", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "webapp.views.index_registration", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "webapp.views.index_search", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "webapp.views.logout", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "webapp.views.index_main_content", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "webapp.views.index_customer_main", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "webapp.views.index_profile", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "webapp.views.index_busket", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "webapp.views.index_dream", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "webapp.views.index_store", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "webapp.views.index_about", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "webapp.views.insert_user", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "webapp.views.check_user", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "webapp.views.like", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "webapp.views.dream", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "webapp.views.dream_delete", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "webapp.views.busket", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 42, "usage_type": "call"}, {"api_name": "webapp.views.busket_delete", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "webapp.views.friend_add", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "webapp.views.friend_delete", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "webapp.views.message_user", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "webapp.views.message_customer", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "webapp.views.comment", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "webapp.views.customer_main", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "webapp.views.customer_profile", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "webapp.views.customer_goods", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 54, "usage_type": "call"}, {"api_name": "webapp.views.customer_messages", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "webapp.views.customer_statistics", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 57, "usage_type": "call"}, {"api_name": "webapp.views.insert_customer", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 58, "usage_type": "call"}, {"api_name": "webapp.views.check_customer", "line_number": 58, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 59, "usage_type": "call"}, {"api_name": "webapp.views.update_product", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 60, "usage_type": "call"}, {"api_name": "webapp.views.insert_product", "line_number": 60, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 62, "usage_type": "call"}, {"api_name": "webapp.views.customer_main_content", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 63, "usage_type": "call"}, {"api_name": "webapp.views.customer_main_content_edit", "line_number": 63, "usage_type": "argument"}, {"api_name": "django.conf.urls.static.static", "line_number": 65, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 65, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 65, "usage_type": "attribute"}]}
+{"seq_id": "582642831", "text": "\"\"\"\nGeneral purpose helper functions.\n\"\"\"\nimport os\nfrom configparser import RawConfigParser\nfrom pathlib import Path\nfrom timeit import default_timer as timer\n\n\ndef get_config():\n \"\"\"\n Looks for a config file using the APP_ENV environment\n variable and reads in the configuration as a dict.\n :return: dict(cfg dict, root, cfg_path)\n \"\"\"\n # set root directory for the app (this directory, that is)\n root = Path.cwd()\n\n # setup configuration file path using the APP_ENV environment variable\n cfg_path = root / 'config' / '{}.ini'.format(os.environ.get('APP_ENV'))\n cfg_parser = RawConfigParser()\n\n # read .ini file for the appropriate app setup (dev, prod or test)\n cfg_parser.read(cfg_path)\n\n # create a dict with the config\n cfg_dict = {x: dict(cfg_parser.items(x)) for x in cfg_parser.sections()}\n return {\"cfg\": cfg_dict, \"root\": root, \"cfg_path\": cfg_path}\n\n\ndef time_func_perf(func, func_args=None, func_kwargs=None) -> float:\n \"\"\"\n Return the time elapsed between start and end, calling a func in\n between them.\n :param func: function to be called\n :param func_args: arguments to be passed to the function\n :param func_kwargs: keyword arguments to passed to the function\n :return: time in fractional seconds\n \"\"\"\n if func_args and func_kwargs:\n start = timer()\n func(*func_args, **func_kwargs)\n stop = timer()\n return stop - start\n\n if func_args and not func_kwargs:\n start = timer()\n func(*func_args)\n stop = timer()\n return stop - start\n\n if func_kwargs and not func_args:\n start = timer()\n func(**func_kwargs)\n stop = timer()\n return stop - start\n\n if not func_args and not func_kwargs:\n start = timer()\n func()\n stop = timer()\n return stop - start\n", "sub_path": "helpers/general.py", "file_name": "general.py", "file_ext": "py", "file_size_in_byte": 1851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "configparser.RawConfigParser", "line_number": 21, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 41, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 43, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 47, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 49, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 53, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 55, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 59, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 61, "usage_type": "call"}]}
+{"seq_id": "42454460", "text": "import os\n\nfrom gym.envs.registration import register\nfrom collections import OrderedDict\nfrom minerl.env import spaces\nfrom minerl.env.core import MineRLEnv\n\nimport numpy as np\nmissions_dir = os.path.dirname(__file__)\nnavigate_observation_space = spaces.Dict({\n 'pov': spaces.Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),\n 'inventory': spaces.Dict({\n 'dirt': spaces.Box(low=0, high=2304, shape=(), dtype=np.int)\n }),\n 'compassAngle': spaces.Box(low=-179.0, high=180.0, shape=(), dtype=np.float32)\n})\n\nnavigate_action_space = spaces.Dict({\n \"forward\": spaces.Discrete(2),\n \"back\": spaces.Discrete(2),\n \"left\": spaces.Discrete(2),\n \"right\": spaces.Discrete(2),\n \"jump\": spaces.Discrete(2),\n \"sneak\": spaces.Discrete(2),\n \"sprint\": spaces.Discrete(2),\n \"attack\": spaces.Discrete(2),\n \"camera\": spaces.Box(low=-180, high=180, shape=(2,), dtype=np.float32),\n \"place\": spaces.Enum('none', 'dirt')})\n\n\nregister(\n id='MineRLSimple-v0',\n entry_point='minerl.env:MineRLEnv',\n kwargs={\n 'xml': os.path.join(missions_dir, 'navigationDenseFixedMap.xml'),\n 'observation_space': navigate_observation_space,\n 'action_space': navigate_action_space,\n },\n max_episode_steps=600,\n)\n", "sub_path": "SimpleEnvironment/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 10, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 10, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 11, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 11, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 12, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 12, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 13, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.int", "line_number": 13, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Box", "line_number": 15, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Dict", "line_number": 18, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 18, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 19, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 19, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 20, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 20, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 21, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 21, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 22, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 22, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 23, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 23, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 24, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 24, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 25, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 25, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Discrete", "line_number": 26, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 26, "usage_type": "name"}, {"api_name": "minerl.env.spaces.Box", "line_number": 27, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "minerl.env.spaces.Enum", "line_number": 28, "usage_type": "call"}, {"api_name": "minerl.env.spaces", "line_number": 28, "usage_type": "name"}, {"api_name": "gym.envs.registration.register", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}]}
+{"seq_id": "198440922", "text": "\"\"\"\n代码链接:https://github.com/ksivaman/Transfer-image-styling\n代码详解链接:https://youyou-tech.com/2019/10/01/%E4%BB%A3%E7%A0%81%E8%AF%A6%E8%A7%A3%EF%BC%9A%E5%9C%A8Pytorch%E5%92%8CPython/\n\"\"\"\n\n\n\"\"\"第一步:涵盖所有必要的库\"\"\"\nfrom PIL import Image\nfrom io import BytesIO\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nimport torch\nimport torch.optim as optim\nimport requests\nfrom torchvision import transforms, models\n\n\"\"\"第二步:因为将不会对网络进行训练,在Pytorch中初始化预训练的VGG19模型并冻结所有模型参数,如果NVIDIA GPUs可用,移动模型到cuda。\"\"\"\n\nstrt = time.clock()\n# get the \"features\" portion of VGG19\nvgg = models.vgg19(pretrained=True).features\n\n# freeze VGG params to avoid chanhe\nfor param in vgg.parameters():\n param.requires_grad_(False)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nvgg.to(device)\n\n\ndef load_image(img_path, max_size=400, shape=None):\n ''' Load in and transform an image, making sure the image\n is <= 400 pixels in the x-y dims.'''\n if \"http\" in img_path:\n response = requests.get(img_path)\n image = Image.open(BytesIO(response.content)).convert('RGB')\n else:\n image = Image.open(img_path).convert('RGB')\n \n # large images will slow down processing\n if max(image.size) > max_size:\n size = max_size\n else:\n size = max(image.size)\n \n if shape is not None:\n size = shape\n\n in_transform = transforms.Compose([\n transforms.Resize(size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # discard the transparent, alpha channel (that's the :3) and add the batch dimension\n image = in_transform(image)[:3,:,:].unsqueeze(0)\n return image\n\n# load in content and style image\n# content = load_image('imgs/tanya_deepak.jpg').to(device)\ncontent = load_image(r'F:\\\\PyStyle\\\\Transfer-image-styling\\\\imgs\\\\cat_small.jpg').to(device)\n\n# Resize style to match content, makes code easier\n# style = load_image('imgs/cat_small_abstract.jpg', shape=content.shape[-2:]).to(device)\nstyle = load_image(r'F:\\\\PyStyle\\\\Transfer-image-styling\\imgs\\\\tanya_deepak_the_scream.jpg', shape=content.shape[-2:]).to(device)\n\n\ndef im_convert(tensor):\n \"\"\"\n Display a tensor as an image.\n 将张量转换为图像\n \"\"\"\n \n image = tensor.to(\"cpu\").clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1,2,0)\n image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))\n image = image.clip(0, 1)\n\n return image\n\n\n\"\"\"第三步:定义一个函数以从VGG19网络中提取特征。图层字典中的图层名称是PyTorch预培训的VGG19模型中的预定义名称。\"\"\"\ndef get_features(image, model, layers=None):\n \"\"\" Run an image forward through a model and get the features for \n a set of layers. Default layers are for VGGNet matching Gatys et al (2016)\n\n 运行一个图像通过一个模型向前,并得到的特征图层。默认层用于VGGNet匹配Gatys等\n \"\"\"\n \n ## Need the layers for the content and style representations of an image\n if layers is None:\n layers = {'0': 'conv1_1',\n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1',\n '21': 'conv4_2', ## content representation\n '28': 'conv5_1'}\n \n features = {}\n x = image\n # model._modules is a dictionary holding each module in the model\n for name, layer in model._modules.items():\n x = layer(x)\n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n\"\"\"第四步:给定特征映射作为张量,定义一个函数来计算gram矩阵。\"\"\"\ndef gram_matrix(tensor):\n\n # 获取张量的 batch_size, depth, height, width\n _, d, h, w = tensor.size()\n \n # reshape so we're multiplying the features for each channel\n tensor = tensor.view(d, h * w)\n \n # calculate the gram matrix\n gram = torch.mm(tensor, tensor.t())\n \n return gram\n\n\"\"\"第五步:获取风格和内容图像的特征,获取风格损失的gram矩阵,将目标图像初始化为风格图像,从5 个gram矩阵的MSE中为损失的线性组合设置风格权重,为两个损失的相对重要性设置内容权重和风格权重(上面的风格损失图像中为“a”),选择用于反向传播的优化器,并设置迭代和修改目标图像的步骤数。\"\"\"\n# get content and style features only once before training\ncontent_features = get_features(content, vgg)\nstyle_features = get_features(style, vgg)\n\n# calculate the gram matrices for each layer of our style representation\nstyle_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}\n\n# create a third \"target\" image and prep it for change\n# it is a good idea to start of with the target as a copy of our *content* image\n# then iteratively change its style\ntarget = content.clone().requires_grad_(True).to(device)\n\nstyle_weights = {'conv1_1': 1.,\n 'conv2_1': 0.75,\n 'conv3_1': 0.2,\n 'conv4_1': 0.2,\n 'conv5_1': 0.2}\n\ncontent_weight = 1 # alpha\nstyle_weight = 1e6 # beta\n\n# iteration hyperparameters\noptimizer = optim.Adam([target], lr=0.003)\nsteps = 200 # decide how many iterations to update your image (5000)\n\n\n\"\"\"第六步:在保持最小损失的同时,迭代修改目标图像。减少“操作步骤”的次数。\"\"\"\n\nfrom tqdm import tqdm\nfor ii in tqdm(range(1, steps+1)):\n \n # get the features from your target image\n target_features = get_features(target, vgg)\n \n # the content loss\n content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)\n \n # the style loss\n # initialize the style loss to 0\n style_loss = 0\n # then add to it for each layer's gram matrix loss\n for layer in style_weights:\n # get the \"target\" style representation for the layer\n target_feature = target_features[layer]\n target_gram = gram_matrix(target_feature)\n _, d, h, w = target_feature.shape\n # get the \"style\" style representation\n style_gram = style_grams[layer]\n # the style loss for one layer, weighted appropriately\n layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2)\n # add to the style loss\n style_loss += layer_style_loss / (d * h * w)\n \n # calculate the *total* loss\n total_loss = content_weight * content_loss + style_weight * style_loss\n \n # update your target image\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n# 最终的图像\nfinal = im_convert(target)\n# 存储\nmatplotlib.image.imsave('F:\\\\PyStyle\\\\Transfer-image-styling\\\\imgs\\\\cat_style.jpg', final)\n\nend = time.clock()\n\nprint(\"时间是:%d秒\"%(end-strt))\n", "sub_path": "transfer.py", "file_name": "transfer.py", "file_ext": "py", "file_size_in_byte": 7113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "time.clock", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.models.vgg19", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 52, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 53, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 55, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 151, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.image.imsave", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 193, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 195, "usage_type": "call"}]}
+{"seq_id": "486614595", "text": "from __future__ import absolute_import\n\nimport logging\nimport threading\nfrom rainbow_logging_handler import RainbowLoggingHandler\nimport sys\nfrom docker_conduct.util import synchronized\n\n__author__ = 'nick'\n\n\nclass LevelRangeFilter(logging.Filter):\n\t\"\"\"Specify log level range to accept\"\"\"\n\tdef __init__(self, low, high):\n\t\tsuper(LevelRangeFilter, self).__init__()\n\n\t\tself.low = low\n\t\tself.high = high\n\n\tdef filter(self, record):\n\t\treturn self.low <= record.levelno <= self.high\n\n\n__CONFIGURED_LOGGERS = set()\n\n@synchronized\ndef configure_basic_logging(logger):\n\t\"\"\"\n\tIdempotent and thread-safe basic logging configuration for provided logger object\n\n\tReturns True if logger was configured, False if logger had been previously configured\n\t\"\"\"\n\tif logger not in __CONFIGURED_LOGGERS:\n\t\t__CONFIGURED_LOGGERS.add(logger)\n\n\t\tformatter = logging.Formatter('%(message)s')\n\n\t\tstdout_handler = RainbowLoggingHandler(stream=sys.stdout)\n\t\tstderr_handler = RainbowLoggingHandler(stream=sys.stderr)\n\n\t\tstdout_handler.setFormatter(formatter)\n\t\tstderr_handler.setFormatter(formatter)\n\n\t\tstdout_filter = LevelRangeFilter(logging.DEBUG, logging.INFO)\n\t\tstderr_filter = LevelRangeFilter(logging.WARNING, logging.CRITICAL)\n\n\t\tstdout_handler.addFilter(stdout_filter)\n\t\tstderr_handler.addFilter(stderr_filter)\n\n\t\tstdout_handler.setLevel(logging.DEBUG)\n\t\tstderr_handler.setLevel(logging.DEBUG)\n\n\t\tlogger.addHandler(stdout_handler)\n\t\tlogger.addHandler(stderr_handler)\n\n\t\treturn True\n\n\telse:\n\t\treturn False\n\n\nclass LoggingMixin(object):\n\t\"\"\"Mixin to provide a single preconfigured logger with sensible defaults on class instances\"\"\"\n\n\t__logger = None\n\n\tauto_configure_basic_logging = True\n\tlog_level = logging.DEBUG\n\n\t@property\n\tdef logger(self):\n\t\t\"\"\"Load, cache, and return a logger object. By default, also performs basic configuration on the logger\"\"\"\n\t\tif self.__logger is None:\n\t\t\tself.__logger = self.get_logger()\n\t\t\tself._configure_logging(self.__logger)\n\t\treturn self.__logger\n\n\t@classmethod\n\tdef _configure_logging(cls, logger):\n\t\t\"\"\"Hook to override logging configuration\"\"\"\n\t\tlogger.setLevel(cls.log_level)\n\n\t\tif cls.auto_configure_basic_logging:\n\t\t\tconfigure_basic_logging(logger)\n\n\tdef get_logger(self):\n\t\t\"\"\"Hook to override how the logger is instantiated\"\"\"\n\t\treturn logging.getLogger(self.__module__)\n", "sub_path": "docker_conduct/logging.py", "file_name": "logging.py", "file_ext": "py", "file_size_in_byte": 2295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.Filter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 36, "usage_type": "call"}, {"api_name": "rainbow_logging_handler.RainbowLoggingHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rainbow_logging_handler.RainbowLoggingHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 50, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 51, "usage_type": "attribute"}, {"api_name": "docker_conduct.util.synchronized", "line_number": 26, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "197143379", "text": "import numpy as np\nfrom numpy.random import randn, random, standard_normal\nimport matplotlib.pyplot as plt\nimport logging\nimport uuid\nfrom baselines.constants import *\n\nclass Channel:\n def __init__(self, channel_type, fading=0, rate=None, op_freq=None):\n self.uuid = uuid.uuid4()\n self.channel_type = channel_type\n # self.bw = []\n # self.max_coverage = []\n self.fading = fading\n # self.awgn = awgn\n\n if not rate:\n if channel_type==LTE:\n self.up = 75*MBPS\n self.down = 300*MBPS\n self.op_freq = 2.6*GHZ\n elif channel_type==WIFI1:\n self.up = 135*MBPS\n self.down = 135*MBPS\n self.op_freq = 2.4*GHZ\n elif channel_type==WIFI2:\n self.up = 135*MBPS\n self.down = 135*MBPS\n self.op_freq = 5*GHZ\n elif channel_type==BT:\n self.up = 22*MBPS\n self.down = 22*MBPS\n self.op_freq = 2.4*GHZ\n elif channel_type==NFC:\n self.up = 212*KBPS\n self.down = 212*KBPS\n self.op_freq = 13.56*MHZ\n elif channel_type==NFC:\n self.up = 212*KBPS\n self.down = 212*KBPS\n self.op_freq = 13.56*MHZ\n else: # channel_type==WIRED:\n self.up = 0.02*GBPS\n self.down = 0.02*GBPS\n else:\n self.up = rate[0]\n self.down = rate[1]\n self.op_freq = op_freq\n\n def get_uuid(self):\n return self.uuid.hex\n\n def get_channel_type(self):\n return self.channel_type\n\n def get_rate(self, is_up=True, dist=0):\n # noises = 0\n gain = 1\n if is_up:\n mean_rate = self.up\n else:\n mean_rate = self.down\n\n if self.fading and self.channel_type!=WIRED:\n gain *= 1 + standard_normal()*np.sqrt(self.fading)\n # return np.random.rayleigh( np.sqrt(2/np.pi)*mean_rate )\n return mean_rate*gain\n\ndef main():\n import pdb; pdb.set_trace()\n\nif __name__=='__main__':\n main()\n", "sub_path": "MECS_gym/baselines/channels.py", "file_name": "channels.py", "file_ext": "py", "file_size_in_byte": 2173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "180215726", "text": "import pickle\r\nimport pandas as pd\r\nimport urllib.request\r\nimport json\r\nimport ast\r\nimport streamlit as st\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\n\r\nwith open('framingham_classifier_Logistic_regression_new.pkl', 'rb') as f:\r\n model = pickle.load(f)\r\n\r\n\r\ndef main():\r\n st.title(\" Disease Predictor\")\r\n st.sidebar.header('Patient Details')\r\n age = st.sidebar.number_input(\"Age (years)\", 0, 200, 49)\r\n sysBP = st.sidebar.number_input(\"systolic blood pressure(mmHg)\", 0.0, 500.0, 132.0)\r\n diaBP = st.sidebar.number_input(\"diastolic blood pressure(mmHg)\", 0, 250, 82)\r\n glucose = st.sidebar.number_input(\"glucose level\", 0.0, 1000.0, 81.0)\r\n # diabetes = st.sidebar.number_input('diabetes', 0, 200, 2)\r\n option = st.sidebar.selectbox('Gender', ('Male', 'Female'))\r\n if option == 'Male':\r\n male = 1.0\r\n elif option == 'Female':\r\n male = 0.0\r\n\r\n option2 = st.sidebar.selectbox('Blood Pressure medications', ('Yes', 'No'))\r\n if option2 == 'Yes':\r\n BPMeds = 1.0\r\n elif option2 == 'No':\r\n BPMeds = 0.0\r\n\r\n totChol = st.sidebar.number_input(\"total cholesterol level(mg/dL)\", 0.0, 1000.0, 236.0)\r\n BMI = st.sidebar.number_input(\"BMI(Body Mass Index )\", 0.0, 100.0, 25.0)\r\n option3 = st.sidebar.selectbox('prevalentStroke', ('Yes', 'No'))\r\n if option3 == 'Yes':\r\n prevalentStroke = 1.0\r\n elif option3 == 'No':\r\n prevalentStroke = 0.0\r\n\r\n option4 = st.sidebar.selectbox('prevalentHyp', ('Yes', 'No'))\r\n if option4 == 'Yes':\r\n prevalentHyp = 1.0\r\n elif option4 == 'No':\r\n prevalentHyp = 0.0\r\n\r\n pregnantNo = st.sidebar.number_input(\"pregnant No\", 0, 200, 0)\r\n plasmaGlucoseConc = st.sidebar.number_input(\"Plasma Glucose Concentration\", 0, 500, 120)\r\n tricepsThickness = st.sidebar.number_input(\"Tricep Thickness\", 0, 200, 20)\r\n SerumInsulin = st.sidebar.number_input(\"Serum Insulin\", 0, 20000, 79)\r\n diabPedigreeFunc = st.sidebar.number_input(\"Diabetic Pedigree Function\", 0.001, 100.0, 1.0)\r\n\r\n data1 = {\r\n \"Inputs\": {\r\n \"input1\":\r\n [\r\n {\r\n 'Number of times pregnant': pregnantNo,\r\n 'Plasma glucose concentration a 2 hours in an oral glucose tolerance test': plasmaGlucoseConc,\r\n 'Diastolic blood pressure (mm Hg)': diaBP,\r\n 'Triceps skin fold thickness (mm)': tricepsThickness,\r\n '2-Hour serum insulin (mu U/ml)': SerumInsulin,\r\n 'Body mass index (weight in kg/(height in m)^2)': BMI,\r\n 'Diabetes pedigree function': diabPedigreeFunc,\r\n 'Age (years)': age,\r\n 'Class variable (0 or 1)': \"0\",\r\n }\r\n ],\r\n },\r\n \"GlobalParameters\": {}\r\n }\r\n body = str.encode(json.dumps(data1))\r\n\r\n url = 'https://ussouthcentral.services.azureml.net/workspaces/13c077d4051e4e1088654297b2bbcb04/services/934466005a2243948e5d6b46d9cdec64/execute?api-version=2.0&format=swagger'\r\n api_key = 'u4bfO9QM3gPLQ4nbSXiFNXP/h4B3yO0QE1lQy0/GOSqPwgOTFwAyWr4WXEYKj4tfrvZ/mIvRZpH2b5bn9QxHgg==' # Replace this with the API key for the web service\r\n headers = {'Content-Type': 'application/json', 'Authorization': ('Bearer ' + api_key)}\r\n\r\n req = urllib.request.Request(url, body, headers)\r\n\r\n try:\r\n response = urllib.request.urlopen(req)\r\n result = response.read()\r\n my_json = result.decode('utf8').replace(\"'\", '\"')\r\n data = json.loads(my_json)\r\n s = json.dumps(data, indent=4, sort_keys=True)\r\n FinalData = data[\"Results\"]['output1']\r\n res = str(FinalData)[1:-1]\r\n json_data = ast.literal_eval(res)\r\n FinalOutputAzure = json_data[\"Scored Labels\"]\r\n NewDiabetesColumn = json_data[\"Scored Labels\"]\r\n\r\n except urllib.error.HTTPError as error:\r\n print(\"The request failed with status code: \" + str(error.code))\r\n # Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure\r\n print(error.info())\r\n print(json.loads(error.read().decode(\"utf8\", 'ignore')))\r\n\r\n input_variables = pd.DataFrame(\r\n [[age, sysBP, diaBP, glucose, NewDiabetesColumn, male, BPMeds, totChol, BMI, prevalentStroke, prevalentHyp]],\r\n columns=['age', 'sysBP', 'diaBP', 'glucose', 'diabetes', 'male', 'BPMeds', 'totChol', 'BMI',\r\n 'prevalentStroke', 'prevalentHyp'],\r\n dtype=float)\r\n result2 = \"\"\r\n\r\n azureresult = int(FinalOutputAzure)\r\n\r\n # if st.sidebar.button(\"Predict\"):\r\n result2 = model.predict(input_variables)[0]\r\n if result2 == 1:\r\n result2 = 'Positive'\r\n elif result2 == 0:\r\n result2 = 'Negative'\r\n\r\n if azureresult == 1:\r\n azureresult = 'Positive'\r\n elif azureresult == 0:\r\n azureresult = 'Negative'\r\n\r\n st.subheader(\"Predicted result for Coronary Heart Diseases in next 10 years:\")\r\n st.success(result2)\r\n\r\n st.subheader(\"Predicted result for diabetes from AzureML\")\r\n st.success(azureresult)\r\n\r\n heart_raw = pd.read_csv('Preprocessed_framingham.csv')\r\n heart_pro = heart_raw.drop(columns=['TenYearCHD'])\r\n df = pd.DataFrame(heart_pro)\r\n\r\n normal_up = [295, 142.5, 394, 696, 56.8, 199, 99, 846, 2.42]\r\n normal_down = [83.5, 48, 40, 107, 15.54, 0, 0,0, 0.078]\r\n current = [sysBP, diaBP, glucose, totChol, BMI, plasmaGlucoseConc, tricepsThickness,\r\n SerumInsulin, diabPedigreeFunc]\r\n\r\n names = ['sysBP', 'diaBP', 'glucose', 'totChol', 'BMI', 'plasmaGlucoseConc',\r\n 'tricepsThickness',\r\n 'SerumInsulin', 'diabPedigreeFunc']\r\n\r\n li = [normal_up, normal_down, current]\r\n chart_data = pd.DataFrame({'Upper Limit': normal_up,\r\n 'Lower Limit': normal_down,\r\n 'Current Position': current})\r\n\r\n st.subheader('')\r\n\r\n fig = go.Figure(data=[\r\n go.Bar(name='Upper Limit', x=names, y=normal_up),\r\n go.Bar(name='Lower Limit', x=names, y=normal_down),\r\n go.Bar(name='Current Position', x=names, y=current)])\r\n fig.update_layout(title={\r\n 'text': \"Range of Safty \",\r\n 'y': 0.9,\r\n 'x': 0.4,\r\n 'xanchor': 'center',\r\n 'yanchor': 'top'}, font=dict(\r\n family=\"Courier New, monospace\",\r\n size=13,\r\n color=\"black\"\r\n ))\r\n st.plotly_chart(fig)\r\n\r\n st.title('Data Distribution')\r\n\r\n df1 = df.head(400)\r\n fig = px.scatter(df1, x=\"totChol\", y=\"age\",\r\n size=\"heartRate\", color=\"glucose\",\r\n hover_name=\"age\", log_x=True, size_max=30)\r\n st.plotly_chart(fig)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pickle.load", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.sidebar.header", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 16, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 17, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 18, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 19, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 20, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 22, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 28, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 34, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 36, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 42, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 48, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 49, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 50, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 51, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.number_input", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 52, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 79, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 79, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 79, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 82, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 82, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 82, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 89, "usage_type": "call"}, {"api_name": "urllib.request.error", "line_number": 93, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 93, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 121, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 123, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 140, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 146, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 146, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 147, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 147, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 148, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 148, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 149, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 160, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 162, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 165, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 165, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 168, "usage_type": "call"}]}
+{"seq_id": "543223032", "text": "\"\"\"magazine_3_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.get_index.as_view(), name='index'),\n path('author/', views.get_author, name='author'),\n path('article/', views.get_single, name='article'),\n path('topic/', views.get_category, name='topic'),\n path('login', views.get_login, name='login'),\n path('logout', views.get_logout, name='logout'),\n path('create', views.get_create, name='create'),\n path('profile', views.get_profile, name='profile'),\n path('update/', views.get_update, name='edit'),\n path('del/', views.get_delete, name='del'),\n path('register', views.get_register, name='register'),\n path('topics', views.get_topics, name='category'),\n path('create/topic', views.create_topics, name='create_topic'),\n path('delete/topic/', views.delete_topics, name='del_topic'),\n path('edit/topic/', views.update_topics, name='edit_topic'),\n]\n", "sub_path": "magazine/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "433931448", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom mock import Mock, patch\nfrom datetime import datetime\nimport pytz\n\nfrom django.test import TestCase\n\nimport pytest\n\nimport requests\nfrom requests import ConnectionError, Timeout, RequestException\n\nfrom feeds.models import Feed\nfrom feeds.tasks import update_feed, get_feed_info\n\n\nclass TestUpdateFeed(TestCase):\n value = {\n 'url': 'https://blog.cloudflare.com/rss/',\n 'title': 'CloudFlare',\n 'description': 'CloudFlare',\n 'feed_updated': datetime(2014, 10, 3, 6, 0, 0, tzinfo=pytz.utc)\n }\n\n def test_update_feed_successful(self):\n val = dict(self.value)\n feed = Feed.objects.create(**val)\n val.update({'title': 'CloudFlare Blog'}) # update the title\n update_feed(val['url'], val)\n assert Feed.objects.get(url=val['url']).title == 'CloudFlare Blog'\n\n def test_update_feed_not_exist(self):\n update_feed(self.value['url'], self.value)\n\n def test_update_feed_invalid_form(self):\n val = dict(self.value)\n feed = Feed.objects.create(**val)\n val.update({'title': 'abcdefghijklmnopqrstuvwxyz' * 10})\n update_feed(val['url'], val)\n assert Feed.objects.get(url=val['url']).title == 'CloudFlare'\n\n def test_update_feed_form_clean(self):\n val = dict(self.value)\n val.update({'description': 'abcdefghijklmnopqrstuvwxyz' * 5})\n feed = Feed.objects.create(**val)\n val.update({'description': 'abcdefghijklmnopqrstuvwxyz' * 20})\n update_feed(val['url'], val)\n assert len(Feed.objects.get(url=val['url']).description) == 200\n\n\nclass TestAddNewFeed(TestCase):\n value = {\n 'url': 'https://blog.cloudflare.com/rss/',\n 'title': 'CloudFlare',\n 'description': 'CloudFlare',\n 'feed_updated': str(datetime(2014, 10, 2, 18, 17, 28))\n }\n\n def test_get_feed_info_successful(self):\n resp = Mock()\n resp.status_code = 200\n resp.text = \"\"\"\n\n\n\n\n\n\n\nhttp://blog.cloudflare.com/\nGhost 0.6\nFri, 3 Oct 2014 01:17:28 GMT\n\n60\n- \n\n\n\n\nProxying around 5% of the Internet’s requests gives us an interesting vantage point from which to observe malicious behavior. It also make us a target. Aside from the many, varied denial of service attacks that break against our defenses we also see huge number of phishing campaigns. In this
\n]]>\n\n\nhttp://blog.cloudflare.com/of-phishing-attacks-and-wordpress-0days/\n\n685f03c1-34a2-4e55-8b19-877c0211615a\n\n\n\nFri, 3 Oct 2014 01:17:28 GMT\n\n\n\n \"\"\"\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == self.value\n assert errors == []\n\n def test_get_feed_info_not_supported_err(self):\n resp = Mock()\n resp.status_code = 200\n resp.text = 'test pagemain content'\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed: Unsupported feed format.']\n\n def test_get_feed_info_404_err(self):\n resp = Mock()\n resp.status_code = 404\n with patch.object(requests, 'get', return_value=resp):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed.']\n\n def test_get_feed_info_timeout_err(self):\n with patch.object(requests, 'get', side_effect=Timeout):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to add this new feed: Timeout. Please try again.']\n\n def test_get_feed_info_connection_err(self):\n with patch.object(requests, 'get', side_effect=ConnectionError):\n data, errors = get_feed_info(self.value['url'])\n assert data == {}\n assert errors == ['Failed to get this new feed.']\n\n\n", "sub_path": "tests/test_feeds/test_tasks.py", "file_name": "test_tasks.py", "file_ext": "py", "file_size_in_byte": 4804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.test.TestCase", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 25, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 30, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 30, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 32, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 33, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 36, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 40, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 40, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 42, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 43, "usage_type": "name"}, {"api_name": "feeds.models.Feed.objects.create", "line_number": 48, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 48, "usage_type": "name"}, {"api_name": "feeds.tasks.update_feed", "line_number": 50, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "feeds.models.Feed.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "feeds.models.Feed", "line_number": 51, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 63, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 99, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 100, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 105, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 108, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 108, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 109, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 114, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 116, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 116, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 117, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 122, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 122, "usage_type": "name"}, {"api_name": "requests.Timeout", "line_number": 122, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 123, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 128, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 128, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 128, "usage_type": "name"}, {"api_name": "feeds.tasks.get_feed_info", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "630406564", "text": "# -*- coding: utf-8 -*-\n# pylint: disable=import-error, no-name-in-module, no-member\n\"\"\"\nModule for testing a tfrecord loading module.\n\n\"\"\"\n\nimport os\nimport sys\nfrom kmlm.base.common import ExitCode, Logger, ParseOption\nfrom kmlm.base.tfrecord_gen import TFRecordGenerator\nfrom kmlm.base.utils import KmlmUtil as Util\n\ndef main():\n logger = Logger(name=\"TFRecord gen test for Keras\", level=Logger.INFO).logger\n config = ParseOption(sys.argv, logger).args\n\n # Setting paths\n text_list = Util.get_file_path(config.paths_data_path,\n config.paths_text_corpus)\n tfrecord_path = \"%s.tfrecord\"%text_list\n\n # Loading vocabularies\n vocab_path = Util.get_file_path(config.paths_data_path, config.paths_vocab)\n if not os.path.isfile(vocab_path):\n logger.critical(\"%s does not exist.\", vocab_path)\n sys.exit(ExitCode.INVALID_FILE_PATH)\n vocab, _ = Util.load_vocab(vocab_path, config=config)\n\n import keras.backend as k\n batch, init = TFRecordGenerator.load_tfrecord(tfrecord_path,\n config.train_batch)\n k.get_session().run(init)\n for i, value in \\\n enumerate(TFRecordGenerator.text_tfrecord_generator(batch,\n config,\n vocab)):\n if i >= 2:\n break\n logger.info(value)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "kmlm/base/tfrecord_gen_keras_test.py", "file_name": "tfrecord_gen_keras_test.py", "file_ext": "py", "file_size_in_byte": 1432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "kmlm.base.common.Logger", "line_number": 15, "usage_type": "call"}, {"api_name": "kmlm.base.common.Logger.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "kmlm.base.common.ParseOption", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "kmlm.base.utils.KmlmUtil.get_file_path", "line_number": 19, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 19, "usage_type": "name"}, {"api_name": "kmlm.base.utils.KmlmUtil.get_file_path", "line_number": 24, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 27, "usage_type": "call"}, {"api_name": "kmlm.base.common.ExitCode.INVALID_FILE_PATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "kmlm.base.common.ExitCode", "line_number": 27, "usage_type": "name"}, {"api_name": "kmlm.base.utils.KmlmUtil.load_vocab", "line_number": 28, "usage_type": "call"}, {"api_name": "kmlm.base.utils.KmlmUtil", "line_number": 28, "usage_type": "name"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator.load_tfrecord", "line_number": 31, "usage_type": "call"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.backend.get_session", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 33, "usage_type": "name"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator.text_tfrecord_generator", "line_number": 35, "usage_type": "call"}, {"api_name": "kmlm.base.tfrecord_gen.TFRecordGenerator", "line_number": 35, "usage_type": "name"}]}
+{"seq_id": "516290640", "text": "import os\nimport json\nfrom google.cloud import bigquery\nfrom google.cloud import error_reporting\nfrom google.api_core import retry\nfrom google.cloud import firestore\nfrom xml.etree import ElementTree\nimport traceback\nimport logging\nimport requests\nimport datetime\nimport pytz\nimport pandas as pd\nimport dateutil\n\nPROJECT_ID = os.getenv(\"GCP_PROJECT\")\nBQ_DATASET = 'vta_vs'\nBQ_TABLE = 'weather_forecast'\nBQ = bigquery.Client()\nDB = firestore.Client()\nclient = error_reporting.Client()\n\n\ndef weather(request):\n \"\"\"\n Responds to a request from Cloud Scheduler. When invoked, gets the weather forecast\n for the (constant) list of lat/long combinations and stores the result in a BigQuery table.\n :param request:\n :return: None\n \"\"\"\n\n # get the forecast\n lat_lon_str_escaped = os.getenv(\"LAT_LON_STR\")\n forecast_url = (\n \"\"\"https://graphical.weather.gov/xml/sample_products/browser_interface/ndfdXMLclient.php?\"\"\"\n \"\"\"whichClient=NDFDgenLatLonList\"\"\"\n \"\"\"&listLatLon={}\"\"\"\n \"\"\"&product=time-series\"\"\"\n \"\"\"&Unit=m\"\"\"\n \"\"\"&temp=temp\"\"\"\n \"\"\"&pop12=pop12\"\"\"\n \"\"\"&Submit=Submit\"\"\").format(lat_lon_str_escaped)\n response = requests.get(forecast_url)\n if response.status_code == 200:\n logging.info(\"Downloaded forecast.\")\n response_xml = ElementTree.fromstring(response.content)\n forecast_time = response_xml.find('head').find('product').find('creation-date').text\n else:\n logging.error(\"Non-success return code from NDFD request\")\n raise RuntimeError('Non-success return code from NDFD request')\n\n # see if we have already seen this record\n logging.info(\"Checking for duplicates.\")\n db_ref = DB.document(u'weather_forecasts/%s' % forecast_time)\n if _was_already_ingested(db_ref):\n logging.warning('Duplication attempt streaming file \\'%s\\'' % db_ref.id)\n return\n else:\n try:\n logging.info(\"Inserting into BigQuery.\")\n _insert_into_bigquery(response_xml, forecast_time)\n _handle_success(db_ref)\n except Exception:\n logging.error(\"Could not insert into BigQuery\")\n _handle_error(db_ref)\n\n\ndef _was_already_ingested(db_ref):\n status = db_ref.get()\n return status.exists and status.to_dict()['success']\n\n\ndef _now():\n return datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')\n\n\ndef _handle_success(db_ref):\n message = 'Forecast \\'%s\\' streamed into BigQuery' % db_ref.id\n doc = {\n u'success': True,\n u'when': _now()\n }\n db_ref.set(doc)\n logging.info(message)\n\n\ndef _handle_error(db_ref):\n message = 'Error streaming forecast \\'%s\\'. Cause: %s' % (db_ref.id, traceback.format_exc())\n doc = {\n u'success': False,\n u'error_message': message,\n u'when': _now()\n }\n db_ref.set(doc)\n logging.error(message)\n\n\ndef _insert_into_bigquery(weather_xml, forecast_time):\n\n tree = weather_xml.find('data')\n time_layouts_df = pd.DataFrame()\n logging.info(\"Processing time\")\n for time_layout in tree.findall('time-layout'):\n time_layouts = []\n time_layout_key = time_layout.find('layout-key').text\n for index, start_time in enumerate(time_layout.findall('start-valid-time')):\n time_layouts.append({'time_layout': time_layout_key,\n 'time_index': index,\n 'time': dateutil.parser.parse(start_time.text)})\n time_layouts_df = pd.concat([time_layouts_df, pd.DataFrame(time_layouts)])\n\n logging.info(\"Processing parameters\")\n parameters_df = pd.DataFrame()\n for parameter in tree.findall('parameters'):\n point_name = parameter.attrib['applicable-location']\n for observation in parameter:\n observations = []\n units = observation.attrib['units']\n time_layout = observation.attrib['time-layout']\n observation_name = \"{} ({})\".format(observation.find('name').text, units)\n for time_index, value in enumerate(observation.findall('value')):\n observations.append({\"point_name\": point_name,\n \"time_layout\": time_layout,\n \"time_index\": time_index,\n observation_name: value.text\n })\n observation_df = pd.DataFrame(observations)\n observation_df = observation_df.merge(time_layouts_df)\n observation_df.drop([\"time_layout\", \"time_index\"], axis=1, inplace=True)\n observation_df = observation_df.set_index(\"time\").resample(\"H\").first().ffill()\n parameters_df = pd.concat([parameters_df, observation_df])\n parameters_df = parameters_df.groupby(['point_name', 'time']).last().reset_index().dropna()\n parameters_df['time'] = parameters_df.time.apply(lambda x: x.astimezone('UTC'))\n parameters_df['forecast_time'] = forecast_time\n parameters_df['temperature_c'] = parameters_df['Temperature (Celsius)']\n parameters_df['pop12'] = parameters_df['12 Hourly Probability of Precipitation (percent)']\n\n logging.info(\"Converting rows to json\")\n rows = json.loads(parameters_df[[\n 'point_name',\n 'time',\n 'forecast_time',\n 'temperature_c',\n 'pop12'\n ]].to_json(orient='records'))\n row_ids = [forecast_time]\n table = BQ.dataset(BQ_DATASET).table(BQ_TABLE)\n logging.info(\"Starting insert into BigQuery\")\n errors = BQ.insert_rows_json(table,\n json_rows=rows,\n row_ids=row_ids,\n retry=retry.Retry(deadline=30))\n if errors:\n logging.error(errors)\n raise BigQueryError(errors)\n\n\nclass BigQueryError(Exception):\n '''Exception raised whenever a BigQuery error happened'''\n\n def __init__(self, errors):\n super().__init__(self._format(errors))\n self.errors = errors\n\n def _format(self, errors):\n err = []\n for error in errors:\n err.extend(error['errors'])\n return json.dumps(err)\n", "sub_path": "functions/weather/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 19, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 19, "usage_type": "name"}, {"api_name": "google.cloud.firestore.Client", "line_number": 20, "usage_type": "call"}, {"api_name": "google.cloud.firestore", "line_number": 20, "usage_type": "name"}, {"api_name": "google.cloud.error_reporting.Client", "line_number": 21, "usage_type": "call"}, {"api_name": "google.cloud.error_reporting", "line_number": 21, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 46, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.utcnow", "line_number": 74, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 74, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 102, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 109, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 138, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}, {"api_name": "google.api_core.retry.Retry", "line_number": 152, "usage_type": "call"}, {"api_name": "google.api_core.retry", "line_number": 152, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}]}
+{"seq_id": "611938727", "text": "import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\ndef main():\n matplotlib.rcParams['text.usetex'] = True\n sns.set(font_scale=4, style=\"whitegrid\")\n base_dir = \"results/resultsRHtest7_12_18_c/rh_add5_repeat10\"\n num_req_field = \"Number of Concurrent Requests\"\n times_field = \"Response Time (s)\"\n title = \"Resource Registration\"\n base_filename = \"rh\"\n file_format = \"eps\"\n show_outliers = False\n ymax = 5\n ytick = 1\n reqs = []\n times = []\n\n files = os.listdir(base_dir)\n\n for f_name in files:\n try:\n num_req = int(f_name.split('_')[1])\n except IndexError:\n num_req = -1\n\n if 0 <= num_req <= 100:\n if num_req % 5 != 0:\n continue\n with open(base_dir + \"/\" + f_name, 'rb') as f:\n for line in f.readlines()[:num_req]:\n reqs.append(num_req)\n times.append(int(line.split()[2]) / 1000.0)\n\n data_frame = pd.DataFrame({num_req_field: reqs, times_field: times})\n response_times_boxplot = pd.melt(data_frame, id_vars=num_req_field, value_name=times_field)\n\n font = {\n 'family': 'Liberation Sans',\n 'weight': 'normal'\n }\n\n plt.rc('font', **font)\n plt.yticks(np.arange(0, ymax + 1, ytick))\n # plt.xlabel(\"x label\")\n # plt.ylabel(\"y label\")\n\n plt.title(title)\n plt.ylim(ymax=ymax)\n # plt.legend(['True Positive Ratio'], loc='lower right')\n # plt.legend(loc='upper right', prop={'size': 40})\n sns.boxplot(x=num_req_field, y=times_field, data=response_times_boxplot, showfliers=show_outliers, notch=True)\n # plt.grid(axis='y')\n # plt.grid(axis='x')\n fig = plt.gcf()\n # fig.tight_layout(pad=0.7 * 22 / font_size)\n # fig.tight_layout()\n fig.set_size_inches(20, 14)\n # plt.show()\n plt.savefig(file_format + \"/\" + base_filename + \".\" + file_format)\n #\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "rh_boxplots.py", "file_name": "rh_boxplots.py", "file_ext": "py", "file_size_in_byte": 2001, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "seaborn.set", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "197824205", "text": "import psycopg2\nimport psycopg2.extras\nimport logging\nfrom pathlib import Path\nfrom configparser import ConfigParser\nimport logging\nimport os.path\nimport xml.etree.ElementTree as ET\n\n\n\n\ndef load_config(config_file=\"database.ini\"):\n \"\"\"\n Load the configuration file\n :param config_file:\n :return:\n \"\"\"\n db = {}\n parser = ConfigParser()\n parser.read( config_file )\n params = parser.items(\"POSTGRES\")\n for param in params:\n db[param[0]] = param[1]\n return db\n\n\ndef get_connection_local_pg(params):\n \"\"\"\n Return the Postgres db Connection\n\n :param db:\n :param config:\n :return:\n \"\"\"\n #conn_string_no_passwd = params\n\n #logging.info(conn_string_no_passwd)\n conn = psycopg2.connect(**params)\n\n cur = conn.cursor()\n logging.info('PostgreSQL database version:')\n cur.execute('SELECT version()')\n db_version = cur.fetchone()\n logging.info(db_version)\n # print(cursor)\n # conn.cursor will return a cursor object, you can use this cursor to perform queries\n\n return conn\n\n\ndef load_annotations(conn,pat_note_id, name, gold_path):\n cur = conn.cursor()\n file_path = gold_path + \"/\" + name.replace(\"txt\", \"xml\")\n if os.path.isfile(file_path):\n f = open(file_path)\n logging.info(f\"Annotation file found for {file_path}\")\n tree = ET.parse(file_path)\n root = tree.getroot()\n for tags in root.findall('TAGS'):\n logging.info(f\"TAGS {tags}\" )\n for tag in tags.iter():\n if len( tag.keys() ) > 0 :\n logging.info(f\"TAG { tag.tag } : { tag.attrib }\" )\n insert_sql = 'INSERT INTO pat_annotations ( pat_note_id, category, type, pos_id, start, stop, text) VALUES ( %s,%s, %s, %s, %s, %s, %s) RETURNING id'\n keys = tag.attrib.keys();\n logging.info(f\" KEYS for tag : {keys}\")\n # os.sys.exit(1)\n cur.execute(insert_sql, (pat_note_id, tag.attrib[\"TYPE\"], tag.tag,tag.attrib['id'], tag.attrib['start'],tag.attrib['end'] ,tag.attrib['text']))\n conn.commit()\n else:\n logging.error(f\"Annotation file not found for {file_path}\")\n os.sys.exit(1)\n\ndef import_data(conn, path, gold_path):\n cur = conn.cursor()\n create_sql_pat_notes = 'CREATE TABLE IF NOT EXISTS \"i2b2_data\".\"public\".pat_notes ( id SERIAL NOT NULL, file_name VARCHAR, note VARCHAR, PRIMARY KEY (id) )'\n create_anno = 'CREATE TABLE IF NOT EXISTS \"i2b2_data\".\"public\".pat_annotations ( id SERIAL NOT NULL, pat_note_id INTEGER, category CHARACTER VARYING, type CHARACTER VARYING, pos_id CHARACTER VARYING, START NUMERIC, STOP NUMERIC, TEXT CHARACTER VARYING, CONSTRAINT patannotations_fk1 FOREIGN KEY (pat_note_id) REFERENCES \"i2b2_data\".\"public\".\"pat_notes\" (\"id\") )'\n res = cur.execute(create_sql_pat_notes)\n res = cur.execute(create_anno)\n conn.commit()\n\n # Truncate table\n truncate_sql = 'TRUNCATE TABLE \"i2b2_data\".\"public\".pat_notes CASCADE'\n truncate_sql_ann = 'TRUNCATE TABLE \"i2b2_data\".\"public\".pat_annotations CASCADE '\n res = cur.execute(truncate_sql_ann)\n res = cur.execute(truncate_sql)\n # Read files and import\n with os.scandir(path) as entries:\n for entry in entries:\n if entry.is_file():\n logging.info(f\"Importing file {entry.name}\")\n with open(entry, 'r') as f:\n data = f.read()\n insert_sql = 'insert into \"i2b2_data\".\"public\".pat_notes(file_name, note) values (%s, %s) RETURNING id'\n cur.execute(insert_sql, (entry.name,data,))\n row_id = cur.fetchone()[0]\n logging.info(f\"Inserted row {row_id} \")\n load_annotations(conn,row_id , entry.name, gold_path )\n conn.commit()\n\n cur.close()\n conn.commit()\n return None", "sub_path": "db_connection.py", "file_name": "db_connection.py", "file_ext": "py", "file_size_in_byte": 3926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 55, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 58, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 58, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.sys", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 73, "usage_type": "name"}, {"api_name": "os.path.scandir", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}]}
+{"seq_id": "291362660", "text": "import logging\nimport os\nimport shutil\nimport subprocess\n\nfrom middlewared.job import Pipes\nfrom middlewared.service import CallError, item_method, job, Service\nfrom middlewared.schema import accepts, Dict, Int, Str\nfrom middlewared.utils import osc, run\nfrom middlewared.utils.shell import join_commandline\n\n\nlogger = logging.getLogger(__name__)\n\n# platform specific imports\nif osc.IS_FREEBSD:\n import sysctl\n\n\nclass PoolService(Service):\n\n @item_method\n @accepts(\n Int('id'),\n Dict(\n 'options',\n Dict(\n 'geli',\n Str('passphrase', private=True, default=''),\n ),\n )\n )\n @job(lock='pool_expand')\n async def expand(self, job, id, options):\n \"\"\"\n Expand pool to fit all available disk space.\n \"\"\"\n pool = await self.middleware.call('pool.get_instance', id)\n if osc.IS_LINUX:\n if options.get('passphrase'):\n raise CallError('Passphrase should not be supplied for this platform.')\n # FIXME: We have issues in ZoL where when pool is created with partition uuids, we are unable\n # to expand pool where all pool related options error out saying I/O error\n # https://github.com/zfsonlinux/zfs/issues/9830\n raise CallError('Expand is not supported on this platform yet because of underlying ZFS issues.')\n else:\n if pool['encrypt']:\n if not pool['is_decrypted']:\n raise CallError('You can only expand decrypted pool')\n\n for error in (\n await self.middleware.call('pool.pool_lock_pre_check', pool, options['geli']['passphrase'])\n ).errors:\n raise CallError(error.errmsg)\n\n all_partitions = {p['name']: p for p in await self.middleware.call('disk.list_all_partitions')}\n\n try:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 16\n geli_resize = []\n try:\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK':\n logger.debug('Not expanding vdev of type %r', vdev['type'])\n continue\n\n if vdev['status'] != 'ONLINE':\n logger.debug('Not expanding vdev that is %r', vdev['status'])\n continue\n\n part_data = all_partitions.get(vdev['device'])\n if not part_data:\n logger.debug('Unable to find partition data for %s', vdev['device'])\n\n partition_number = part_data['partition_number']\n if not partition_number:\n logger.debug('Could not parse partition number from %r', vdev['device'])\n continue\n\n assert part_data['disk'] == vdev['disk']\n\n if osc.IS_LINUX:\n await run(\n 'sgdisk', '-d', str(partition_number), '-n', f'{partition_number}:0:0',\n '-c', '2:', '-u', f'{partition_number}:{part_data[\"partition_uuid\"]}',\n '-t', f'{partition_number}:BF01', part_data['path']\n )\n await run('partprobe', os.path.join('/dev', part_data['disk']))\n else:\n await run('camcontrol', 'reprobe', vdev['disk'])\n await run('gpart', 'recover', vdev['disk'])\n await run('gpart', 'resize', '-i', str(partition_number), vdev['disk'])\n\n if osc.IS_FREEBSD and pool['encrypt']:\n geli_resize_cmd = (\n 'geli', 'resize', '-s', str(part_data['size']), vdev['device']\n )\n rollback_cmd = (\n 'gpart', 'resize', '-i', str(partition_number), '-s', str(part_data['size']), vdev['disk']\n )\n\n logger.warning('It will be obligatory to notify GELI that the provider has been resized: %r',\n join_commandline(geli_resize_cmd))\n logger.warning('Or to resize provider back: %r',\n join_commandline(rollback_cmd))\n geli_resize.append((geli_resize_cmd, rollback_cmd))\n finally:\n if osc.IS_FREEBSD and geli_resize:\n await self.__geli_resize(pool, geli_resize, options)\n finally:\n if osc.IS_FREEBSD:\n sysctl.filter('kern.geom.debugflags')[0].value = 0\n\n for vdev in sum(pool['topology'].values(), []):\n if vdev['type'] != 'DISK' or vdev['status'] != 'ONLINE':\n continue\n\n await self.middleware.call('zfs.pool.online', pool['name'], vdev['guid'], True)\n\n async def __geli_resize(self, pool, geli_resize, options):\n failed_rollback = []\n\n lock_job = await self.middleware.call('pool.lock', pool['id'], options['geli']['passphrase'])\n await lock_job.wait()\n if lock_job.error:\n logger.warning('Error locking pool: %s', lock_job.error)\n\n for geli_resize_cmd, rollback_cmd in geli_resize:\n if not await self.__run_rollback_cmd(rollback_cmd):\n failed_rollback.append(rollback_cmd)\n\n if failed_rollback:\n raise CallError(\n 'Locking your encrypted pool failed and rolling back changes failed too. '\n f'You\\'ll need to run the following commands manually:\\n%s' % '\\n'.join(\n map(join_commandline, failed_rollback)\n )\n )\n else:\n for geli_resize_cmd, rollback_cmd in geli_resize:\n try:\n await run(*geli_resize_cmd, encoding='utf-8', errors='ignore')\n except subprocess.CalledProcessError as geli_resize_error:\n if geli_resize_error.stderr.strip() == 'geli: Size hasn\\'t changed.':\n logger.info(\n '%s: %s', join_commandline(geli_resize_cmd), geli_resize_error.stderr.strip()\n )\n else:\n logger.error(\n '%r failed: %s. Resizing partition back', join_commandline(geli_resize_cmd),\n geli_resize_error.stderr.strip()\n )\n if not await self.__run_rollback_cmd(rollback_cmd):\n failed_rollback.append(rollback_cmd)\n\n if failed_rollback:\n raise CallError(\n 'Resizing partitions of your encrypted pool failed and rolling back '\n 'changes failed too. You\\'ll need to run the following commands manually:\\n%s' %\n '\\n'.join(map(join_commandline, failed_rollback))\n )\n\n if options['geli']['passphrase']:\n unlock_job = await self.middleware.call(\n 'pool.unlock', pool['id'], {'passphrase': options['geli']['passphrase']}\n )\n else:\n unlock_job = await self.middleware.call(\n 'pool.unlock', pool['id'], {'recoverykey': True},\n pipes=Pipes(input=self.middleware.pipe())\n )\n\n def copy():\n with open(pool['encryptkey_path'], 'rb') as f:\n shutil.copyfileobj(f, unlock_job.pipes.input.w)\n\n try:\n await self.middleware.run_in_thread(copy)\n finally:\n await self.middleware.run_in_thread(unlock_job.pipes.input.w.close)\n\n await unlock_job.wait()\n if unlock_job.error:\n raise CallError(unlock_job.error)\n\n @staticmethod\n async def __run_rollback_cmd(rollback_cmd):\n try:\n await run(*rollback_cmd, encoding='utf-8', errors='ignore')\n except subprocess.CalledProcessError as rollback_error:\n logger.critical(\n '%r failed: %s. To restore your pool functionality you will have to run this command manually.',\n join_commandline(rollback_cmd),\n rollback_error.stderr.strip()\n )\n return False\n else:\n return True\n", "sub_path": "src/middlewared/middlewared/plugins/pool_/expand.py", "file_name": "expand.py", "file_ext": "py", "file_size_in_byte": 8588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 16, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 16, "usage_type": "name"}, {"api_name": "middlewared.service.Service", "line_number": 20, "usage_type": "name"}, {"api_name": "middlewared.utils.osc.IS_LINUX", "line_number": 39, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 39, "usage_type": "name"}, {"api_name": "middlewared.service.CallError", "line_number": 41, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 45, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 49, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 54, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 59, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 59, "usage_type": "name"}, {"api_name": "sysctl.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_LINUX", "line_number": 83, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 83, "usage_type": "name"}, {"api_name": "middlewared.utils.run", "line_number": 84, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "middlewared.utils.run", "line_number": 91, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 92, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 93, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 95, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 95, "usage_type": "name"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 104, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 106, "usage_type": "call"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 109, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 109, "usage_type": "name"}, {"api_name": "middlewared.utils.osc.IS_FREEBSD", "line_number": 112, "usage_type": "attribute"}, {"api_name": "middlewared.utils.osc", "line_number": 112, "usage_type": "name"}, {"api_name": "sysctl.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "middlewared.service.item_method", "line_number": 22, "usage_type": "name"}, {"api_name": "middlewared.schema.accepts", "line_number": 23, "usage_type": "call"}, {"api_name": "middlewared.schema.Int", "line_number": 24, "usage_type": "call"}, {"api_name": "middlewared.schema.Dict", "line_number": 25, "usage_type": "call"}, {"api_name": "middlewared.schema.Dict", "line_number": 27, "usage_type": "call"}, {"api_name": "middlewared.schema.Str", "line_number": 29, "usage_type": "call"}, {"api_name": "middlewared.service.job", "line_number": 33, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 134, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 137, "usage_type": "argument"}, {"api_name": "middlewared.utils.run", "line_number": 143, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 144, "usage_type": "attribute"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 147, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 151, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 158, "usage_type": "call"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 161, "usage_type": "argument"}, {"api_name": "middlewared.job.Pipes", "line_number": 171, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 176, "usage_type": "call"}, {"api_name": "middlewared.service.CallError", "line_number": 185, "usage_type": "call"}, {"api_name": "middlewared.utils.run", "line_number": 190, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 191, "usage_type": "attribute"}, {"api_name": "middlewared.utils.shell.join_commandline", "line_number": 194, "usage_type": "call"}]}
+{"seq_id": "522808363", "text": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import Blueprint, redirect, render_template, request, url_for\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport json\nimport datetime\n\n\ncrud = Blueprint('crud', __name__)\n\ncred = credentials.Certificate('eu1-kubernetes-169431998c5e.json')\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n\n# [START list]\n@crud.route(\"/\")\ndef list():\n print(\"Show books\")\n users_ref = db.collection(u'users')\n docs = users_ref.get()\n\n for doc in docs:\n print(u'{} => {}'.format(doc.id, doc.to_dict()))\n return render_template(\n \"list.html\")\n# [END list]\n\n\n\n# [START add]\n'''\ncurl -v -XPOST http://localhost:8080/books/saldo --header \"Content-Type: application/json\" --data '{\"airport\":\"arlanda\",\"saldo\":\"5\"}'\n\n'''\n\n@crud.route('/saldo', methods=['POST'])\ndef add():\n if request.method == 'POST':\n print('Adding saldo to db')\n content = request.get_json(silent=True)\n airport=\"non\"\n for key in content:\n if key == \"airport\":\n airport=content[key]\n print(content[key])\n content['timestamp']=datetime.datetime.now()\n doc_ref = db.collection(u'saldo').document(airport+\"_\"+str(datetime.datetime.now()))\n doc_ref.set(content\n )\n\n return render_template(\"form.html\")\n# [END add]\n# [START add]\n'''\n\ncurl -v -XPOST http://localhost:8080/books/action --header \"Content-Type: application/json\" --data '{\"airport\":\"arlanda\",\"run\":\"name\",\"data\":{\"temp\":\"34\",\"brushlenght\":\"20\",\"power\":\"300\"}}'\n\n'''\n\n\n\n@crud.route('/action', methods=['POST'])\ndef action():\n if request.method == 'POST':\n print('Action log')\n content = request.get_json(silent=True)\n airport=\"non\"\n for key in content:\n if key == \"airport\":\n airport=content[key]\n print(content[key])\n content['timestamp']=datetime.datetime.now()\n doc_ref = db.collection(u'action').document(airport+\"_\"+str(datetime.datetime.now()))\n doc_ref.set(content\n )\n\n return render_template(\"form.html\")\n\n@crud.route('//edit', methods=['GET', 'POST'])\ndef edit(id):\n book = get_model().read(id)\n\n if request.method == 'POST':\n data = request.form.to_dict(flat=True)\n\n book = get_model().update(data, id)\n\n return redirect(url_for('.view', id=book['id']))\n\n return render_template(\"form.html\", action=\"Edit\", book=book)\n\n\n\n", "sub_path": "api/backend/crud.py", "file_name": "crud.py", "file_ext": "py", "file_size_in_byte": 3059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Blueprint", "line_number": 23, "usage_type": "call"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 25, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 25, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 26, "usage_type": "call"}, {"api_name": "firebase_admin.firestore.client", "line_number": 27, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}]}
+{"seq_id": "59388924", "text": "import pandas as pd\nimport pylab as pl\nimport numpy as np\nimport scipy.optimize as opt\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\nimport matplotlib.pyplot as plt\n\n#Read in Cancer data files\ncsv_path = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv'\ncell_df = pd.read_csv(csv_path)\n\n#plot Clump vs UnifSize with the dependent variable being 2(benign) or 4(maligant)\nax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant');\ncell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax);\nplt.savefig('SVM.png')\n\n#----------preprocessing----------------------------------------------\n\n#list out all atrributes\n\n\n#Drop nonnumerical rows in BareNuc attribute then convert remaining to int\ncell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()]\ncell_df['BareNuc'] = cell_df['BareNuc'].astype('int')\n\n\n#create another df just of independent variables, hence the double square brakets\nfeature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']]\nX = np.asarray(feature_df) #convert to nparray\nprint(X[0:5])\n\n#nparray of dependent variable\ncell_df['Class'] = cell_df['Class'].astype('int')\ny = np.asarray(cell_df['Class'])\nprint(y[0:5])\n\n#Train, test\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)\nprint ('Train set:', X_train.shape, y_train.shape)\nprint ('Test set:', X_test.shape, y_test.shape)\n\n\n#------------------Modeling----------------------------------------------------\n\nfrom sklearn import svm\n\n#Use default equation to create our seperator\nclf = svm.SVC(kernel='rbf')\nclf.fit(X_train, y_train)\n\n#predict new values\nyhat = clf.predict(X_test)\nprint(yhat[0:5])\n\n\n#------------------Evaluation--------------------------------------------------\n\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('CF.png')\n\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4])\nnp.set_printoptions(precision=2)\n\nprint (classification_report(y_test, yhat))\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')\n\n\n\n#Plotting the F1-score\nfrom sklearn.metrics import f1_score\nf1_score(y_test, yhat, average='weighted')\n\n\n#Using jaccard to measure accuracy\nfrom sklearn.metrics import jaccard_similarity_score\njaccard_similarity_score(y_test, yhat)\n", "sub_path": "SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 3835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.to_numeric", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 72, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.metrics.jaccard_similarity_score", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "582934993", "text": "\nfrom tkinter import *\nfrom tkinter import ttk, colorchooser, filedialog, messagebox\nimport tkinter.messagebox\nimport PIL.ImageGrab as ImageGrab\n\nclass main:\n\n def __init__(self, master):\n self.master = master\n self.penwidth = 5\n self.color_bg = 'white'\n self.color_fg = 'black'\n self.drawwidgets()\n self.setup()\n self.c.bind('', self.paint) # drwaing the line\n self.c.bind('', self.reset)\n\n def changeW(self, e):\n self.penwidth = e\n\n def clear(self):\n self.c.delete(ALL)\n\n def paint(self, e):\n paint_color = self.color_bg if self.eraser_on else self.color_fg\n if self.old_x and self.old_y:\n self.c.create_line(self.old_x, self.old_y, e.x, e.y,\n width=self.penwidth, fill=paint_color,\n capstyle=ROUND, smooth=TRUE, splinesteps=36)\n self.old_x = e.x\n self.old_y = e.y\n\n def erase(self):\n self.activate_button(self.eraser, eraser_mode=True)\n\n def penf(self):\n self.activate_button(self.pen)\n\n def reset(self, e): # reseting or cleaning the canvas\n self.old_x = None\n self.old_y = None\n\n def change_fg(self): # changing the pen color\n self.color_fg = colorchooser.askcolor(color=self.color_fg)[1]\n\n def change_bg(self): # changing the background color canvas\n self.color_bg = colorchooser.askcolor(color=self.color_bg)[1]\n self.c['bg'] = self.color_bg\n self.clear()\n\n def activate_button(self, some_button, eraser_mode=False):\n self.active_button.config(relief=RAISED)\n some_button.config(relief=SUNKEN)\n self.active_button = some_button\n self.eraser_on = eraser_mode\n\n def msg(self):\n tkinter.messagebox.showinfo(\n 'About Paint Application', 'This is a paint aplication which provides you with features such as changing background and brush colors. It also provides you with a slider to change pen width.')\n\n def about(self):\n tkinter.messagebox.showinfo(\n \"Paint Application Developer\", \"Kushal Nitin Lahoti MIS :- 111803179\")\n\n def save_it(self):\n\n try:\n filename = filedialog.asksaveasfilename(defaultextension='.jpg')\n ImageGrab.grab().save(filename)\n messagebox.showinfo('Paint says', 'image is saved as ' + str(filename))\n\n except:\n messagebox.showerror('Paint says', 'unable to save image, \\n something went wrong')\n\n def save_it_destroy(self):\n\n try:\n filename = filedialog.asksaveasfilename(defaultextension='.jpg')\n ImageGrab.grab().save(filename)\n messagebox.showinfo('Paint says', 'image is saved as ' + str(filename))\n self.root.destroy()\n\n except:\n messagebox.showerror('Paint says', 'unable to save image, \\n something went wrong')\n\n def drawwidgets(self):\n self.controls = Frame(self.master, height=1000, width=140)\n self.label = Label(self.controls, text='Width',font=('Times 15'), fg='red')\n self.label.place(x=10, y=280)\n self.slider = ttk.Scale(self.controls, from_=5,to=100, command=self.changeW, orient=VERTICAL)\n self.slider.set(self.penwidth)\n self.slider.place(x=80, y=250)\n self.controls.pack(side=LEFT)\n self.pen = Button(self.controls, text='Pen',font=('Times 12'), command=self.penf)\n self.pen.place(x=15, y=200)\n self.eraser = Button(self.controls, text='Eraser',font=('Times 12'), command=self.erase)\n self.eraser.place(x=75, y=200)\n self.c = Canvas(self.master, width=500, height=400, bg=self.color_bg)\n self.c.pack(fill=BOTH, expand=True)\n\n menu = Menu(self.master)\n self.master.config(menu=menu)\n\n filemenu = Menu(menu, tearoff = 0)\n menu.add_cascade(label='File', menu=filemenu)\n filemenu.add_command(label='Save', command=self.save_it)\n filemenu.add_command(label='Save and Exit', command=self.save_it_destroy)\n\n color = Menu(menu, tearoff=0)\n menu.add_cascade(label='Colors', menu=color)\n color.add_command(label='Brush Color', command=self.change_fg)\n color.add_command(label='Background Color', command=self.change_bg)\n\n option = Menu(menu, tearoff=0)\n menu.add_cascade(label='Options', menu=option)\n option.add_command(label='Clear Canvas', command=self.clear)\n option.add_command(label='Exit', command=self.master.destroy)\n\n help_option = Menu(menu, tearoff=0)\n menu.add_cascade(label=\"Help\", menu=help_option)\n #help_option.add_command(label=\"Features\", command=self.features_msg)\n help_option.add_command(label=\"About Paint Application\", command=self.msg)\n help_option.add_command(label=\"Develpoers\", command=self.about)\n\n def setup(self):\n\n self.old_x = None\n self.old_y = None\n self.eraser_on = False\n self.active_button = self.pen\n\n\nif __name__ == '__main__':\n root = Tk()\n main(root)\n root.geometry('900x600')\n root.title('Paint Application')\n root.mainloop()\n\n\n", "sub_path": "Assignment 6_PaintApp/Paint_app.py", "file_name": "Paint_app.py", "file_ext": "py", "file_size_in_byte": 5242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tkinter.colorchooser.askcolor", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 45, "usage_type": "name"}, {"api_name": "tkinter.colorchooser.askcolor", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.colorchooser", "line_number": 48, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 70, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 71, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 71, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 74, "usage_type": "name"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 79, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 80, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 81, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 85, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 85, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scale", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 91, "usage_type": "name"}]}
+{"seq_id": "533320268", "text": "import netifaces\nimport socket\nfrom smtplib import SMTP_SSL as SMTP\nfrom email.mime.text import MIMEText\n\n\nintInfo = list();\nfrom_addr = \"XXXX@gmail.com\"\nto_addrs = \"XXXX@gmail.com\"\ncontent = \"\"\n\nintList = netifaces.interfaces()\n\nfor intf in intList:\n try:\n addr = netifaces.ifaddresses(intf)\n content += (intf + \"\\n\")\n content += (\" IP address:\\t\"+addr[netifaces.AF_INET][0]['addr']+\"\\n\")\n content += (\"Subnet Mask:\\t\"+addr[netifaces.AF_INET][0]['netmask'] + \"\\n\\n\")\n except KeyError:\n content += (\"No IP address found on \"+intf+\"\\n\\n\")\n \n\nmsg = MIMEText(content, 'plain')\nmsg['Subject'] = \"--Network Information from \"+socket.gethostname()+\"--\"\nmsg['From'] = from_addr\n\nser = SMTP(\"smtp.gmail.com:465\")\nser.ehlo()\nser.login(\"XXXX\", \"XXXX\")\nser.sendmail(from_addr, to_addrs, msg.as_string())\nser.quit()", "sub_path": "mailing/mailing.py", "file_name": "mailing.py", "file_ext": "py", "file_size_in_byte": 851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "netifaces.interfaces", "line_number": 12, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 16, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 18, "usage_type": "attribute"}, {"api_name": "netifaces.AF_INET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "email.mime.text.MIMEText", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 25, "usage_type": "call"}, {"api_name": "smtplib.SMTP_SSL", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "452409097", "text": "import copy\nimport logging\nimport os.path\nimport jinja2\nimport yaml\nimport jsonpatch\nimport json\nfrom collections import OrderedDict\nimport kpm.manifest as manifest\nfrom kpm.template_filters import jinja_filters\nfrom kpm.kub_base import KubBase\nfrom kpm.kubernetes import get_endpoint\nfrom kpm.utils import convert_utf8\n\n\n# __all__ = ['Kub']\n\nlogger = logging.getLogger(__name__)\n\n\n_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n\njinja_env = jinja2.Environment()\njinja_env.filters.update(jinja_filters())\n\n\nclass Kub(KubBase):\n def __init__(self, *args, **kwargs):\n super(Kub, self).__init__(*args, **kwargs)\n self.manifest = manifest.Manifest(self.package)\n\n @property\n def kubClass(self):\n return Kub\n\n def _create_namespaces(self, resources):\n # @TODO create namespaces for all manifests\n if self.namespace:\n ns = self.create_namespace(self.namespace)\n resources[ns['file']] = ns\n return resources\n\n def _append_patch(self, resources={}):\n index = 0\n\n for resource in self.manifest.resources:\n index += 1\n resources[resource['file']] = resource\n resource[\"order\"] = index\n if 'protected' not in resource:\n resource[\"protected\"] = False\n if 'patch' not in resource:\n resource['patch'] = []\n\n if self._deploy_resources is not None:\n for resource in self._deploy_resources:\n if 'patch' in resource and len(resource['patch']) > 0:\n resources[resource['file']][\"patch\"] += resource['patch']\n\n return resources\n\n def _generate_shards(self, resources):\n if not len(self.shards):\n return resources\n sharded = {}\n to_remove = []\n index = 0\n for _, resource in resources.iteritems():\n index += 1\n resource['order'] = index\n if 'sharded' in resource and resource['sharded'] is True:\n for shard in self.shards:\n shard_vars = shard.get('variables', {})\n shard_vars.update({\"name\": shard['name']})\n\n r = {\"file\": \"%s-%s.yaml\" % (os.path.splitext(resource['file'])[0].replace(\"/\", \"_\"),\n shard['name']),\n \"order\": index,\n \"protected\": False,\n \"template\": resource['file'],\n \"variables\": shard_vars,\n \"patch\": resource['patch'] + shard.get('patch', []),\n \"name\": \"%s-%s\" % (resource['name'], shard['name']),\n \"type\": resource['type']}\n sharded[r['file']] = r\n index += 1\n to_remove.append(resource['file'])\n map(resources.pop, to_remove)\n resources.update(sharded)\n return resources\n\n def _default_patch(self, resources):\n for _, resource in resources.iteritems():\n patch = [\n {\"op\": \"replace\",\n \"path\": \"/metadata/name\",\n \"value\": resource['name']},\n ]\n if 'patch' not in resource:\n resource['patch'] = []\n resource['patch'] += patch\n return resources\n\n def _resolve_jinja(self, resources, from_value=False):\n for _, resource in resources.iteritems():\n if 'template' in resource:\n tpl_file = resource['template']\n else:\n tpl_file = resource['file']\n if from_value or resource.get('generated', False) is True:\n val = yaml.safe_dump(convert_utf8(resource['value']), width=float(\"inf\"))\n else:\n val = self.package.files[os.path.join('templates', tpl_file)]\n template = jinja_env.from_string(val)\n variables = copy.deepcopy(self.variables)\n if 'variables' in resource:\n variables.update(resource['variables'])\n if len(self.shards):\n variables['kpmshards'] = self.shards\n t = template.render(variables)\n resource['value'] = yaml.safe_load(t)\n return resources\n\n def _apply_patches(self, resources):\n for _, resource in resources.iteritems():\n if self.namespace:\n if 'namespace' in resource['value']['metadata']:\n op = 'replace'\n else:\n op = 'add'\n resource['patch'].append({\"op\": op, \"path\": \"/metadata/namespace\", \"value\": self.namespace})\n\n if len(resource['patch']):\n patch = jsonpatch.JsonPatch(resource['patch'])\n result = patch.apply(resource['value'])\n resource['value'] = result\n return resources\n\n def resources(self):\n if self._resources is None:\n self._resources = OrderedDict()\n resources = self._resources\n resources = self._create_namespaces(resources)\n resources = self._append_patch(resources)\n resources = self._generate_shards(resources)\n resources = self._default_patch(resources)\n resources = self._resolve_jinja(resources)\n resources = self._apply_patches(resources)\n resources = self._resolve_jinja(resources, True)\n return self._resources\n\n def prepare_resources(self, dest=\"/tmp\", index=0):\n for _, resource in self.resources().iteritems():\n index += 1\n path = os.path.join(dest, \"%02d_%s_%s\" % (index,\n self.version,\n resource['file'].replace(\"/\", \"_\")))\n f = open(path, 'w')\n f.write(yaml.safe_dump(convert_utf8(resource['value'])))\n resource['filepath'] = f.name\n f.close()\n return index\n\n def build(self):\n result = []\n for kub in self.dependencies:\n kubresources = OrderedDict([(\"package\", kub.name),\n (\"version\", kub.version),\n (\"namespace\", kub.namespace),\n (\"resources\", [])])\n for _, resource in kub.resources().iteritems():\n resource = self._annotate_resource(kub, resource)\n\n kubresources['resources'].\\\n append(OrderedDict({\"file\": resource['file'],\n \"hash\": resource['value']['metadata']['annotations'].get('kpm.hash', None),\n \"protected\": resource['protected'],\n \"name\": resource['name'],\n \"kind\": resource['value']['kind'].lower(),\n \"endpoint\": get_endpoint(\n resource['value']['kind'].lower()).\n format(namespace=self.namespace),\n \"body\": json.dumps(resource['value'])}))\n\n result.append(kubresources)\n return {\"deploy\": result,\n \"package\": {\"name\": self.name,\n \"version\": self.version}}\n", "sub_path": "kpm/kub.py", "file_name": "kub.py", "file_ext": "py", "file_size_in_byte": 7402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "yaml.resolver", "line_number": 21, "usage_type": "attribute"}, {"api_name": "jinja2.Environment", "line_number": 24, "usage_type": "call"}, {"api_name": "kpm.template_filters.jinja_filters", "line_number": 25, "usage_type": "call"}, {"api_name": "kpm.kub_base.KubBase", "line_number": 28, "usage_type": "name"}, {"api_name": "kpm.manifest.Manifest", "line_number": 31, "usage_type": "call"}, {"api_name": "kpm.manifest", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.path.splitext", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 77, "usage_type": "name"}, {"api_name": "yaml.safe_dump", "line_number": 112, "usage_type": "call"}, {"api_name": "kpm.utils.convert_utf8", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 114, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 116, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 122, "usage_type": "call"}, {"api_name": "jsonpatch.JsonPatch", "line_number": 135, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 156, "usage_type": "name"}, {"api_name": "yaml.safe_dump", "line_number": 160, "usage_type": "call"}, {"api_name": "kpm.utils.convert_utf8", "line_number": 160, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 168, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 176, "usage_type": "call"}, {"api_name": "kpm.kubernetes.get_endpoint", "line_number": 181, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 184, "usage_type": "call"}]}
+{"seq_id": "437875581", "text": "from time import time\nfrom typing import Optional\n\nfrom httpx import Client\n\n\nclass HTTPClient:\n def __init__(\n self,\n base_url: str,\n default_headers: Optional[dict] = None,\n default_params: Optional[dict] = None,\n ):\n self.base_url = base_url\n self.default_headers = default_headers or {}\n self.default_params = default_params or {}\n\n self.http_client = Client(\n base_url=self.base_url, headers=default_headers, params=self.default_params\n )\n\n def get(self, url: str, params: dict, headers: dict = None):\n custom_headers = headers or {}\n\n if not params.get(\"_rticket\"):\n params[\"_rticket\"] = int(round(time() * 1000))\n\n response = self.http_client.get(url=url, params=params, headers=custom_headers)\n\n return response\n\n def post(self, url: str, data: dict, headers: dict = None):\n custom_headers = headers or {}\n\n rticket = int(round(time() * 1000))\n\n response = self.http_client.post(\n url=url, params={\"_rticket\": rticket}, data=data, headers=custom_headers\n )\n\n return response\n", "sub_path": "tiktok_bot/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.Optional", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 12, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}]}
+{"seq_id": "413247640", "text": "\"\"\"\n python3 readgraph.py [ digraph-file ]\n\nTakes a csv (comma separated values) text file containing the vertices\nand edges of a street digraph and converts it into a digraph instance.\n\nIf the optional argument digraph-file is supplied, reads that, otherwise\ntakes input from stdin\n\"\"\"\n# import sys\n\n# # throw away executable name before processing command line arguments\n# argv = sys.argv[1:]\n\n# # if filename is supplied, use that, otherwise use stdin\n# if argv:\n# digraph_file_name = argv.pop(0)\n# digraph_file = open(digraph_file_name, 'r')\n# else:\n# digraph_file = sys.stdin\n\n# For testing, just use a simple representation of set of vertices, set of\n# edges as ordered pairs, and dctionaries that map\n# vertex to (lat,long)\n# edge to street name\n\nimport logging\nfrom digraph import Digraph\n\n\ndef readgraph(digraph_file_name):\n # create logger\n readgraph_logger = logging.getLogger('MappingServer.readgraph')\n\n readgraph_logger.info(\"Opening graphfile:\" + str(digraph_file_name))\n digraph_file = open(digraph_file_name, 'r')\n readgraph_logger.info(\"Open successful.\")\n\n V = set()\n E = set()\n V_coord = {}\n E_name = {}\n\n G = Digraph()\n\n readgraph_logger.info(\"Parsing file...\")\n # process each line in the file\n for line in digraph_file:\n\n # strip all trailing whitespace\n line = line.rstrip()\n\n fields = line.split(\",\")\n type = fields[0]\n\n if type == 'V':\n # got a vertex record\n (id, lat, long) = fields[1:]\n\n # vertex id's should be ints\n id = int(id)\n\n # lat and long are floats\n lat = float(lat)\n long = float(long)\n\n V.add(id)\n V_coord[id] = (lat, long)\n\n elif type == 'E':\n # got an edge record\n (start, stop, name) = fields[1:]\n\n # vertices are ints\n start = int(start)\n stop = int(stop)\n e = (start, stop)\n\n # get rid of leading and trailing quote \" chars around name\n name = name.strip('\"')\n\n # consistency check, we don't want auto adding of vertices when\n # adding an edge.\n if start not in V or stop not in V:\n readgraph_logger.error(\"Edge {} has an endpoint that is not a vertex\".format(e))\n raise Exception(\"Edge {} has an endpoint that is not a vertex\".format(e))\n\n G.add_edge(e)\n E_name[e] = name\n else:\n # weird input\n readgraph_logger.error(\"Error: weird line |{}|\".format(line))\n raise Exception(\"Error: weird line |{}|\".format(line))\n\n readgraph_logger.info(\"Parsing finished.\")\n readgraph_logger.debug(\"Graph has \" + str(G.num_vertices()) + \" vertices and \" + str(G.num_edges()) + \" edges\")\n\n V_Rev = {}\n\n for key in V_coord:\n V_Rev[key] = (int(V_coord[key][0] * 100000), int(V_coord[key][1] * 100000))\n\n V_coord_rev = dict([(v, k) for (k, v) in V_Rev.items()])\n\n names = (V_coord, E_name, V_coord_rev)\n\n return (G, names)\n", "sub_path": "readgraph.py", "file_name": "readgraph.py", "file_ext": "py", "file_size_in_byte": 3090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "digraph.Digraph", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "169804472", "text": "import sys\nfrom datetime import datetime\nimport time\nimport re\n\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import Factory, ClientFactory\nfrom twisted.web.client import getPage\n\nfrom project_config import API_KEY\n\n# Server info dictionary containing port numbers and all adjacent servers for\n# each server in the herd.\nserver_info = {\n\t'Alford' \t: { 'port' : 12000, 'adjacent_servers' : ['Parker', 'Powell'] \t\t\t\t},\n\t'Bolden' \t: { 'port' : 12001, 'adjacent_servers' : ['Parker', 'Powell'] \t\t\t\t},\n\t'Hamilton' \t: { 'port' : 12002, 'adjacent_servers' : ['Parker'] \t\t\t\t\t\t},\n\t'Parker' \t: { 'port' : 12003, 'adjacent_servers' : ['Alford', 'Bolden', 'Hamilton'] \t},\n\t'Powell' \t: { 'port' : 12004, 'adjacent_servers' : ['Alford', 'Bolden'] \t\t\t\t}\n}\n\n# Server protocol for the proxy herd\nclass ProxyHerdProtocol(LineReceiver):\n\tdef __init__(self, factory):\n\t\tself.factory = factory\n\t\tself.name = None\n\t\tself.connectionID = -1\n\t\tself.GooglePlacesURL = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'\n\n\n\tdef connectionMade(self):\n\t\tself.connectionID = self.factory.numConnectionsReceived\n\t\tself.factory.numConnectionsReceived += 1\n\t\tself.logMessage('CONNECTION #' + str(self.connectionID) + ' made. Time: ' + str(datetime.now()))\n\n\n\tdef connectionLost(self, reason):\n\t\tself.logMessage('CONNECTION #' + str(self.connectionID) + ' lost. Time: ' + str(datetime.now()))\n\n\n\tdef lineReceived(self, msg):\n\t\tself.logMessage('RECEIVED message: ' + msg)\n\t\t# Splits the message by whitespace\n\t\tmsg_list = msg.split()\n\t\tif (msg_list == []):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Determine the type of command\n\t\tcmd_name = msg_list[0]\n\t\tif (cmd_name == 'IAMAT' and len(msg_list) == 4):\n\t\t\tself.processIAMATcommand(msg)\n\t\telif (cmd_name == 'WHATSAT' and len(msg_list) == 4):\n\t\t\tself.processWHATSATcommand(msg)\n\t\telif (cmd_name == 'AT' and len(msg_list) == 8):\n\t\t\tself.processATcommand(msg)\n\t\telif (cmd_name == 'INIT_QUERY' and len(msg_list) == 3):\n\t\t\tself.processINIT_QUERYcommand(msg)\n\t\telse:\n\t\t\tself.processInvalidCommand(msg)\n\n\n\tdef processInvalidCommand(self, msg):\n\t\tInvldResponse = '? ' + str(msg)\n\t\tself.sendLine(InvldResponse)\n\t\tself.logMessage('SENT invalid command notification: ' + InvldResponse)\n\n\n\t# Command received from adjacent server who has just come online and wants to\n\t# obtain existing user location information\n\tdef processINIT_QUERYcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\tif (msg_list[1] != 'FROM' or\n\t\t\tmsg_list[2] not in server_info[self.factory.serverID]['adjacent_servers']):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\tsender_serverID = msg_list[2]\n\t\tfor ATmessage in self.factory.users.values():\n\t\t\treactor.connectTCP('localhost', server_info[sender_serverID]['port'],\n\t\t\t\t\t\t\t\tLocationPropagationFactory(self.factory.serverID, ATmessage))\n\t\t\tself.logMessage('SENT location information to server ' + sender_serverID +\n\t\t\t\t\t\t\t' following INIT_QUERY: ' + ATmessage)\n\n\n\tdef processIAMATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Match for the latitude and longitude\n\t\tlatlon_match = re.match('^(\\+|-)\\d+.\\d+(\\+|-)\\d+.\\d+$', msg_list[2])\n\t\t# Match for the time the client thinks it sent the message\n\t\ttime_match = re.match('^\\d+.\\d+$', msg_list[3])\n\t\tif (latlon_match == None or time_match == None):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Calculate the time difference between the server's idea of when it\n\t\t# received the message and the client's timestamp\n\t\ttime_diff = time.time() - float(time_match.group())\n\t\ttime_diff_sign = ''\n\t\tif time_diff >= 0:\n\t\t\ttime_diff_sign = '+'\n\t\tIAMATdata = ' '.join(msg_list[1:])\n\t\t# Formulate the AT response message\n\t\tATresponse = (\t'AT ' + self.factory.serverID + ' ' +\n\t\t\t\t\t\ttime_diff_sign + str(time_diff) + ' ' + IAMATdata\t)\n\t\t# Set the protocols name to the client ID\n\t\tself.name = msg_list[1]\n\t\t# Set an entry in the users dictionary with the client ID as the key\n\t\t# and the At response message as the value\n\t\tself.factory.users[self.name] = ATresponse\n\t\tself.sendLine(ATresponse)\n\t\tself.logMessage('SENT AT response to user ' + self.name + ' following IAMAT command: ' + ATresponse)\n\t\t# Propagate the AT response to adjacent servers\n\t\tself.propagateLocationUpdate(ATresponse)\n\n\t\t\t\n\tdef processWHATSATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Match on the radius and the upper bound of information to provide\n\t\t# (not actually used)\n\t\tif (re.match('^\\d+$', msg_list[2]) == None or\n\t\t\tre.match('^\\d+$', msg_list[3]) == None):\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\tother_client_name = msg_list[1]\n\t\t# Check that the other client name provided is actually one of this\n\t\t# server's users\n\t\tif self.factory.users.has_key(other_client_name) == False:\n\t\t\tself.processInvalidCommand(msg)\n\t\t\treturn\n\n\t\t# Get the appropriate AT response to send back to the client\n\t\tATresponse = self.factory.users[other_client_name]\n\t\tself.sendLine(ATresponse)\n\t\tself.logMessage('SENT AT response to user following WHATSAT command: ' + ATresponse)\n\n\t\t# Match for the latitude and longitude\n\t\tlatlon_match = re.match('^((\\+|-)\\d+.\\d+)((\\+|-)\\d+.\\d+)$', ATresponse.split()[4])\n\t\t# Formulate the Google Places query URL\n\t\tpageURL = self.GooglePlacesURL + 'location=' + latlon_match.group(1) + ',' + latlon_match.group(3) + '&' + 'radius=' + msg_list[2] + '&' + 'sensor=false&' + 'key=' + API_KEY\n\t\td = getPage(pageURL)\n\t\td.addCallbacks(self.sendGooglePlacesResponse, self.sendGooglePlacesErrorNotification)\n\n\tdef sendGooglePlacesResponse(self, response):\n\t\t# Replace every sequence of two or more newlines with a single newline\n\t\tresponse = re.sub('\\n\\n+', '\\n', response)\n\t\t# Replace all trailing newline with two newlines\n\t\tresponse = re.sub('\\n*$', '\\n', response)\n\t\tself.sendLine(response)\n\t\tself.logMessage('SENT Google Places response message:\\n' + response)\n\n\t# Send an error notification if the Google Places query failed\n\tdef sendGooglePlacesErrorNotification(self, response):\n\t\terror_msg = 'Error: Could not retrieve Google Places information for the given request.'\n\t\tself.sendLine(error_msg)\n\t\tself.logMessage('SENT Google Places error notification: ' + error_msg)\n\n\n\tdef processATcommand(self, msg):\n\t\tmsg_list = msg.split()\n\t\t# Check that the AT message is valid\n\t\t# - valid server ID\n\t\t# - valid time difference\n\t\t# - valid latitude and longitude\n\t\t# - valid user timestamp\n\t\t# - FROM valid server ID\n\t\tif (not server_info.has_key(msg_list[1]) or\n\t\t\tre.match('^(\\+|-)\\d+.\\d+$', msg_list[2]) == None or\n\t\t\tre.match('^(\\+|-)\\d+.\\d+(\\+|-)\\d+.\\d+$', msg_list[4]) == None or\n\t\t\tre.match('^\\d+.\\d+$', msg_list[5]) == None or\n\t\t\tmsg_list[6] != 'FROM' or\n\t\t\tmsg_list[7] not in server_info[self.factory.serverID]['adjacent_servers']):\n\t\t\tself.sendLine('Error: Invalid AT message sent to server ' + self.factory.serverID + '.')\n\t\t\tself.logMessage('ERROR: Invalid AT message received from server.')\n\t\t\treturn\n\n\t\tsender_serverID = msg_list[7]\n\t\tclientID = msg_list[3]\n\t\tmsg = ' '.join(msg_list[:6])\n\t\t# If the information for the client ID present in the AT message is\n\t\t# already present in this server's users dictionary, don't update\n\t\t# or propagate\n\t\tif self.factory.users.has_key(clientID) and self.factory.users[clientID] == msg:\n\t\t\tself.logMessage('IGNORED the propagated location update from ' + sender_serverID + '.')\n\t\t\treturn\n\n\t\t# Update user information\n\t\tself.factory.users[clientID] = msg\n\t\t# Propagate AT message\n\t\tself.propagateLocationUpdate(msg)\n\n\t# Propagates a location update to adjacent servers\n\tdef propagateLocationUpdate(self, ATmessage):\n\t\tadjacent_servers = server_info[self.factory.serverID]['adjacent_servers']\n\t\tfor s in adjacent_servers:\n\t\t\treactor.connectTCP('localhost', server_info[s]['port'], LocationPropagationFactory(self.factory.serverID, ATmessage))\n\t\t\tself.logMessage('PROPAGATED location update to server ' + s + ': ' + ATmessage)\n\n\tdef logMessage(self, msg):\n\t\tlogfile = open(self.factory.logfilename, 'a')\n\t\tlogfile.write(msg + '\\n\\n')\n\t\tlogfile.close()\n\n\n# Server factory\nclass ProxyHerdFactory(Factory):\n\tdef __init__(self, serverID):\n\t\tself.users = {}\n\t\tself.serverID = serverID\n\t\tself.numConnectionsReceived = 0\n\t\tself.logfilename = serverID + '-' + str(datetime.now()) + '.log'\n\t\tprint('Initializing server...\\nCreating logfile \\\"' + self.logfilename + '\\\".')\n\t\t# Create logfile\n\t\tlogfile = open(self.logfilename, 'w')\n\t\t# Query adjacent servers for existing user location information\n\t\tprint('Querying adjacent servers for existing user location information...\\n')\n\t\tadjacent_servers = server_info[self.serverID]['adjacent_servers']\n\t\tfor s in adjacent_servers:\n\t\t\treactor.connectTCP('localhost', server_info[s]['port'],\n\t\t\t\t\t\t\t\tLocationPropagationFactory(self.serverID, 'INIT_QUERY'))\n\t\t\tlogfile.write(\t'SENT initial user location information query to server ' +\n\t\t\t\t\t\t\ts + ': ' + 'INIT_QUERY FROM ' + self.serverID + '\\n\\n')\n\n\t\tlogfile.close()\t\t\t\n\n\n\tdef buildProtocol(self, addr):\n\t\treturn ProxyHerdProtocol(self)\n\n\n# Client protocol for the proxy herd for propagating an update or initially\n# querying an adjacent server for user location information upon coming online\nclass LocationPropagationProtocol(LineReceiver):\n\tdef __init__(self, factory):\n\t\tself.factory = factory\n\t\n\tdef connectionMade(self):\n\t\tself.sendLine(self.factory.ATmessage + ' FROM ' + self.factory.sender_serverID)\n\t\tself.transport.loseConnection()\n\n\n# Client factory for propagating location information, both for sending updates\n# and for querying adjacent servers for existing user location information\n# upon coming online.\n# Needs to inherit from \"ClientFactory\" in order to use with \"connectTCP\"\n# function.\nclass LocationPropagationFactory(ClientFactory):\n\tdef __init__(self, sender_serverID, ATmessage):\n\t\t# Server ID of the server who initiated the connection\n\t\tself.sender_serverID = sender_serverID\n\t\tself.ATmessage = ATmessage\n\n\tdef startedConnecting(self, connector):\n\t\treturn\n\n\tdef buildProtocol(self, addr):\n\t\treturn LocationPropagationProtocol(self)\n\n\tdef clientConnectionLost(self, connector, reason):\n\t\treturn\n\n\tdef clientConnectionFailed(self, connector, reason):\n\t\treturn\n\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\tprint('Usage: python proxyherd.py serverID')\n\t\texit()\n\n\tserverID = str(sys.argv[1])\n\n\tif not server_info.has_key(serverID):\n\t\tprint('Error: Invalid serverID')\n\t\texit()\n\t\n\treactor.listenTCP(server_info[serverID]['port'], ProxyHerdFactory(serverID))\n\treactor.run()\n\nif __name__ == '__main__':\n\tmain()\n\n", "sub_path": "proxyherd.py", "file_name": "proxyherd.py", "file_ext": "py", "file_size_in_byte": 10419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "twisted.protocols.basic.LineReceiver", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 81, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 81, "usage_type": "name"}, {"api_name": "re.match", "line_number": 90, "usage_type": "call"}, {"api_name": "re.match", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "re.match", "line_number": 123, "usage_type": "call"}, {"api_name": "re.match", "line_number": 140, "usage_type": "call"}, {"api_name": "project_config.API_KEY", "line_number": 142, "usage_type": "name"}, {"api_name": "twisted.web.client.getPage", "line_number": 143, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 148, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 150, "usage_type": "call"}, {"api_name": "re.match", "line_number": 170, "usage_type": "call"}, {"api_name": "re.match", "line_number": 171, "usage_type": "call"}, {"api_name": "re.match", "line_number": 172, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 198, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 198, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.Factory", "line_number": 208, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 221, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 221, "usage_type": "name"}, {"api_name": "twisted.protocols.basic.LineReceiver", "line_number": 235, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.ClientFactory", "line_number": 249, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 269, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 273, "usage_type": "attribute"}, {"api_name": "twisted.internet.reactor.listenTCP", "line_number": 279, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 279, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 280, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 280, "usage_type": "name"}]}
+{"seq_id": "454879132", "text": "import logging\n\nfrom suds.client import Client, WebFault\n\nfrom rest_framework import generics, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom django.conf import settings\n\nfrom bonus_cards.models import BonusCard\nfrom bonus_cards.serializers import (\n BonusCardBalanceSerializer, BonusCardTransactionsSerializer,\n BonusCardGetUuidSerializer,\n)\n\nLOGGER = logging.getLogger(__name__)\n\nif settings.DEBUG:\n import logging\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('suds.client').setLevel(logging.DEBUG)\n logging.getLogger('suds.transport').setLevel(logging.DEBUG)\n logging.getLogger('suds.xsd.schema').setLevel(logging.DEBUG)\n logging.getLogger('suds.wsdl').setLevel(logging.DEBUG)\n\n\n@api_view(('GET',))\ndef api_root(request):\n return Response({\n 'get_uuid': reverse(\n 'bonus_cards:get_uuid', kwargs={\n 'bonus_program_uuid': 'BONUS_PROGRAM_UUID',\n 'card_number': 'CARD_NUMBER'\n }\n ),\n 'balance': reverse(\n 'bonus_cards:balance', kwargs={'uuid': 'CARD_UUID'}\n ),\n 'transactions': reverse(\n 'bonus_cards:transactions', kwargs={'uuid': 'CARD_UUID'}\n ),\n })\n\n\nclass BonusCardBaseView(generics.RetrieveAPIView):\n def get_wsdl_service(self, wsdl_client):\n raise NotImplementedError\n\n def get_serialized_data(self, wsdl_data):\n raise NotImplementedError\n\n def get_object(self):\n try:\n wsdl_client = Client(settings.ONE_C_WSDL,\n username=settings.ONE_C_WSDL_USER,\n password=settings.ONE_C_WSDL_PASSWORD)\n\n wsdl_response = self.get_wsdl_service(wsdl_client)\n\n except (WebFault, Exception) as e:\n LOGGER.error(e)\n wsdl_response = None\n\n return wsdl_response\n\n def retrieve(self, request, *args, **kwargs):\n wsdl_obj = self.get_object()\n if wsdl_obj:\n if u'Данные' in wsdl_obj:\n serialized_data = self.get_serialized_data(wsdl_obj[u'Данные'])\n # cache.set(request.path, serialized_data, 60)\n return Response(serialized_data)\n\n elif u'_Сообщение' in wsdl_obj:\n message = {'message': wsdl_obj[u'_Сообщение']}\n return Response(message, status.HTTP_404_NOT_FOUND)\n\n else:\n message = {'message': '1C error communication'}\n return Response(message, status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass BonusCardGetUuidView(BonusCardBaseView):\n serializer_class = BonusCardGetUuidSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardGetID(\n self.kwargs.get('bonus_program_uuid'),\n self.kwargs.get('card_number'),\n )\n\n def get_serialized_data(self, wsdl_data):\n bonus_card = BonusCard(uuid=wsdl_data[u'Идентификатор'])\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n\n\nclass BonusCardBalanceView(BonusCardBaseView):\n serializer_class = BonusCardBalanceSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardInfo(self.kwargs.get('uuid'))\n\n def get_serialized_data(self, wsdl_data):\n balance = wsdl_data[u'Баллы']\n bonus_card = BonusCard(uuid=self.kwargs.get('uuid'), balance=balance)\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n\n\nclass BonusCardTransactionsView(BonusCardBaseView):\n serializer_class = BonusCardTransactionsSerializer\n\n def get_wsdl_service(self, wsdl_client):\n return wsdl_client.service.BonusCardTransactions(\n self.kwargs.get('uuid')\n )\n\n def get_serialized_data(self, wsdl_data):\n transactions = []\n for transaction in wsdl_data[u'ИсторияОпераций']:\n transactions.append({\n 'period': transaction[u'Период'],\n 'balance': transaction[u'Баллы'],\n 'comment': transaction[u'Комментарий'],\n })\n\n bonus_card = BonusCard(uuid=self.kwargs.get('uuid'),\n transactions=transactions)\n serializer = self.get_serializer(bonus_card)\n return serializer.data\n", "sub_path": "src/bonus_cards/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 46, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 46, "usage_type": "name"}, {"api_name": "suds.client.Client", "line_number": 55, "usage_type": "call"}, {"api_name": "django.conf.settings.ONE_C_WSDL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_C_WSDL_USER", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_C_WSDL_PASSWORD", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "suds.client.WebFault", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 77, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 77, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 81, "usage_type": "name"}, {"api_name": "bonus_cards.serializers.BonusCardGetUuidSerializer", "line_number": 85, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 94, "usage_type": "call"}, {"api_name": "bonus_cards.serializers.BonusCardBalanceSerializer", "line_number": 100, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 107, "usage_type": "call"}, {"api_name": "bonus_cards.serializers.BonusCardTransactionsSerializer", "line_number": 113, "usage_type": "name"}, {"api_name": "bonus_cards.models.BonusCard", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "429048619", "text": "from flask import Flask, render_template, request, send_file, url_for\nfrom werkzeug.utils import secure_filename\nimport numpy\nimport calendar\nimport time\nfrom custom_util import *\n\napp=Flask(__name__)\n\n# get running path\nbase_dir = os.path.dirname(__file__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/success\", methods=['POST'])\ndef success():\n if request.method=='POST':\n filestr=request.files[\"file\"]\n\n #convert string data to numpy array\n npimg = numpy.frombuffer(filestr.read(), numpy.uint8)\n\n # convert numpy array to image\n img = cv2.imdecode(npimg, cv2.COLOR_RGB2BGR)\n\n image_predicted = predict_Luna_Ju(img)\n\n file_to_save = str(calendar.timegm(time.gmtime()))\n\n cv2.imwrite(os.path.join(base_dir, 'static', file_to_save + '.jpg'), image_predicted)\n\n image_file = url_for('static', filename=file_to_save + '.jpg')\n\n\n return render_template(\"success.html\", img = image_file)\n\n\nif __name__ == '__main__':\n app.run(port=80)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.frombuffer", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "calendar.timegm", "line_number": 30, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "92752113", "text": "import fcntl\nimport sys\nimport os\nimport time\nimport tty\nimport termios\nimport random\nimport subprocess\nimport numpy as np\nimport cv2\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\n#from matplotlib import pyplot as plt\n\nclass RawStream(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.original_stty = termios.tcgetattr(self.stream)\n tty.setcbreak(self.stream)\n def __exit__(self, type, value, traceback):\n termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)\n\nclass NonBlockingStream(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)\n def __exit__(self, *args):\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)\n\n\nclass GCodeInstruction:\n def __init__(self, cmd, params):\n self.cmd_num = int(cmd[1:])\n self.cmd = cmd[0]\n self.params = params\n\n def toString(self):\n param_str = \"\"\n for key,value in self.params.items():\n param_str+=key+(\"{:.2f}\".format(value))+\" \"\n return self.cmd+str(self.cmd_num)+\" \"+param_str[:-1]\n\n @classmethod\n def parse(cls, str):\n words = str.strip().split(' ')\n cmd = words[0]\n if len(cmd)<2:\n return None\n params = {}\n for f in words[1:]:\n key = f[0]\n value = float(f[1:])\n params[key] = value\n return cls(cmd, params)\n\nclass Gcode:\n def __init__(self, instructions):\n self.instructions = instructions\n\n def bounds(self):\n max_x = 0\n max_y = 0\n min_y = sys.maxsize\n min_x = sys.maxsize\n for i in self.instructions:\n if 'X' in i.params:\n x = i.params['X']\n # if 'I' in i.params:\n # x+=i.params['I']\n if x>max_x:\n max_x = x \n if xmax_y:\n max_y = y \n if ySample:\n image_file = self.list_images[item]\n\n with open(image_file + \".cat\", \"r\") as f:\n content = f.read()\n\n pos = [int(e) for e in content.split(\" \") if e != '']\n\n pos = array(pos)[1:]\n\n x = pos[::2]\n\n y = pos[1::2]\n\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n return Sample(\n x=cv2.imread(image_file),\n y=array([xmin, xmax, ymin, ymax])\n )\n\n\n\n", "sub_path": "examples/cat_detection/data/cat_loader.py", "file_name": "cat_loader.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nautilus.dataset.dataset.Dataset", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 25, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ma.array", "line_number": 44, "usage_type": "call"}, {"api_name": "nautilus.data.sample.sample.Sample", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.ma.array", "line_number": 55, "usage_type": "call"}, {"api_name": "nautilus.data.sample.sample.Sample", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "496769857", "text": "import math\nfrom neuron import neuron\nclass layer:\n def __init__(self, size, numIn):\n w_initialization = [0 - math.sqrt(1 / numIn), math.sqrt(1 / numIn)]\n neurons = []\n for i in range(size):\n neurons.append(neuron(0, numIn, w_initialization))\n\n self.neurons = neurons\n self.size = size\n self. numIn = numIn\n \n def activate(self, inputValues):\n if len(inputValues) != self.numIn:\n print(\"Error: Number of inputs does not match layer input parametres\")\n return None\n\n for neuronIndex in range(len(self.neurons)):\n self.neurons[neuronIndex].activate(inputValues)", "sub_path": "layer.py", "file_name": "layer.py", "file_ext": "py", "file_size_in_byte": 668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "math.sqrt", "line_number": 5, "usage_type": "call"}, {"api_name": "neuron.neuron", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "23136834", "text": "import torch\nimport torch.onnx\nfrom torch.utils import data\nimport torchvision\nimport torchvision.models as models\nimport numpy as np\nimport cv2\nimport pytesseract\nfrom dataset import custom_dataset\nfrom imutils.object_detection import non_max_suppression\nimport sys\nimport os\nimport copy\nfrom model import EAST\nfrom matplotlib import pyplot as plt\n\ndef predictions(prob_score, geo, min_confidence):\n\t(numR, numC) = prob_score.shape[2:4]\n\tboxes = []\n\tconfidence_val = []\n\n\t# loop over rows\n\tfor y in range(0, numR):\n\t\tscoresData = prob_score[0, 0, y]\n\t\tx0 = geo[0, 0, y]\n\t\tx1 = geo[0, 1, y]\n\t\tx2 = geo[0, 2, y]\n\t\tx3 = geo[0, 3, y]\n\t\tanglesData = geo[0, 4, y]\n\n\t\t# loop over the number of columns\n\t\tfor i in range(0, numC):\n\t\t\tif scoresData[i] < min_confidence:\n\t\t\t\t#print (scoresData[i])\n\t\t\t\t#print ('Low Confidence!')\n\t\t\t\tcontinue\n\n\t\t\t(offX, offY) = (i * 4.0, y * 4.0)\n\n\t\t\t# extracting the rotation angle for the prediction and computing the sine and cosine\n\t\t\tangle = anglesData[i]\n\t\t\tcos = np.cos(angle)\n\t\t\tsin = np.sin(angle)\n\n\t\t\t# using the geo volume to get the dimensions of the bounding box\n\t\t\th = x0[i] + x2[i]\n\t\t\tw = x1[i] + x3[i]\n\n\t\t\t# compute start and end for the text pred bbox\n\t\t\tendX = int(offX + (cos * x1[i]) + (sin * x2[i]))\n\t\t\tendY = int(offY - (sin * x1[i]) + (cos * x2[i]))\n\t\t\tstartX = int(endX - w)\n\t\t\tstartY = int(endY - h)\n\n\t\t\tboxes.append((startX, startY, endX, endY))\n\t\t\tconfidence_val.append(scoresData[i])\n\n\t# return bounding boxes and associated confidence_val\n\treturn (boxes, confidence_val)\n\ndef connect_horizontal_boxes(boxes, x_threshold=30, y_threshold=30):\n\tboxes_copy = boxes.copy()\n\tbox_it = sorted(boxes_copy, key=lambda tup: tup[0])\n\n\tdone = False\n\twhile (done == False):\n\t\tmerger = (1e6, 1e6)\n\t\tbox_to_merge = (0, 0, 0, 0)\n\t\tfound = False\n\t\ti = 0\n\t\tfor box in box_it:\n\t\t\t(start_X, start_Y, end_X, end_Y) = box\n\t\t\tj = 0\n\t\t\tfor new_box in box_it:\n\t\t\t\tif (i < j):\n\t\t\t\t\t(start_Xn, start_Yn, end_Xn, end_Yn) = new_box\n\t\t\t\t\tstartYdiff = np.abs(start_Yn - start_Y)\n\t\t\t\t\tendYdiff = np.abs(end_Yn - end_Y)\n\t\t\t\t\tYdiff = startYdiff + endYdiff\n\t\t\t\t\tif (Ydiff < y_threshold):\n\t\t\t\t\t\tXdiff = np.abs(start_Xn - end_X) \n\t\t\t\t\t\tif ((start_Xn <= end_X) or (Xdiff < x_threshold)):\n\t\t\t\t\t\t\tmerger = (i, j)\n\t\t\t\t\t\t\tsY = np.minimum(start_Y, start_Yn)\n\t\t\t\t\t\t\teY = np.maximum(end_Y, end_Yn)\n\t\t\t\t\t\t\tfound = True\n\n\t\t\t\t\t\t\tif (start_Xn <= end_X):\n\t\t\t\t\t\t\t\teX = np.maximum(end_X, end_Xn)\n\t\t\t\t\t\t\t\tbox_to_merge = (start_X, sY, eX, eY)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tbox_to_merge = (start_X, sY, end_Xn, eY)\n\t\t\t\t\t\t\tbreak\n\t\t\t\tj += 1\n\t\t\tif (found == True):\n\t\t\t\tbreak\n\t\t\ti += 1\n\n\t\t#delete merger, and add new box, assume i before j\n\t\tif (found == True):\n\t\t\tbox_change = copy.deepcopy(box_it)\n\t\t\tbox_change.pop(merger[0])\n\t\t\tbox_change.pop(merger[1]-1)\n\t\t\tbox_change.append(box_to_merge)\n\t\t\tbox_change = sorted(box_change, key=lambda tup: tup[0])\n\t\t\tbox_it = copy.deepcopy(box_change)\n\t\telse:\n\t\t\tdone = True\n\n\treturn box_it\n\ndef process_image(image_read, image_real, east, min_confidence, width, height, hyst_X=0, hyst_Y=0, offset_X=0, offset_Y=0, remove_boxes=False):\n\n\t#unnecessary default\n\targs = {\"image\":\"/Users/surajmenon/Desktop/findocDocs/apple_test1.png\", \"east\":\"/Users/surajmenon/Desktop/findocDocs/frozen_east_text_detection.pb\", \"min_confidence\":0.5, \"width\":320, \"height\":320}\n\n\targs['image'] = image_real\n\targs['east'] = east\n\targs['min_confidence'] = min_confidence\n\targs['width'] = width\n\targs['height'] = height\n\n\tif (image_read == True):\n\t\timage = cv2.imread(args['image'])\n\telse:\n\t\timage = args['image']\n\n\t#print ('Processing Image')\n\t#print (image.shape)\n\tprint ('.')\n\n\n\t#Saving a original image and shape\n\torig = image.copy()\n\t(origH, origW) = image.shape[:2]\n\n\t# print ('Image Size')\n\t# print (origH)\n\t# print (origW)\n\t# exit()\n\n\t# set the new height and width to default 320 by using args #dictionary. \n\t(newW, newH) = (args[\"width\"], args[\"height\"])\n\n\t#Calculate the ratio between original and new image for both height and weight. \n\t#This ratio will be used to translate bounding box location on the original image. \n\trW = origW / float(newW)\n\trH = origH / float(newH)\n\n\t# resize the original image to new dimensions\n\timage = cv2.resize(image, (newW, newH))\n\t(H, W) = image.shape[:2]\n\n\tnet = args[\"east\"]\n\n\tblob = cv2.dnn.blobFromImage(image, 1.0, (W, H),\n\t(123.68, 116.78, 103.94), swapRB=True, crop=False)\n\n\t# construct a blob from the image to forward pass it to EAST model\n\t# blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),\n\t# \t(123.68, 116.78, 103.94), swapRB=True, crop=False)\n\n\t# net = cv2.dnn.readNet(args[\"east\"])\n\n\t# We would like to get two outputs from the EAST model. \n\t#1. Probabilty scores for the region whether that contains text or not. \n\t#2. Geometry of the text -- Coordinates of the bounding box detecting a text\n\t# The following two layer need to pulled from EAST model for achieving this. \n\t# layerNames = [\n\t# \t\"feature_fusion/Conv_7/Sigmoid\",\n\t# \t\"feature_fusion/concat_3\"]\n\n\t# net.setInput(blob)\n\t#(scores, geometry) = net.forward(layerNames)\n\n\tprint (blob.shape)\n\t#image_r = image.reshape(1, 3, H, W)\n\tprint (blob.dtype)\n\tprint (blob.shape)\n\timage_r_pt = torch.from_numpy(blob)\n\tprint (image_r_pt.shape)\n\tprint (image_r_pt.dtype)\n\timage_r_pt = image_r_pt.type(torch.FloatTensor)\n\t(scores, geometry) = net(image_r_pt)\n\tprint (scores.shape)\n\tprint (geometry.shape)\n\n\tscores_n = scores.detach().cpu().numpy()\n\tgeometry_n = geometry.detach().cpu().numpy()\n\n\t(boxes, confidence_val) = predictions(scores_n, geometry_n, args['min_confidence'])\n\tboxes = non_max_suppression(np.array(boxes), probs=confidence_val)\n\n\t##Text Detection and Recognition \n\n\t# initialize the list of results\n\tresults = []\n\t\n\t#for now, say we don't want any X-shifting\n\tx_start_buffer = 0\n\n\t#boxes = connect_horizontal_boxes(boxes, x_threshold=50, y_threshold=20) \n\tadjusted_boxes = []\n\n\t# loop over the bounding boxes to find the coordinate of bounding boxes\n\tfor (startX, startY, endX, endY) in boxes:\n\t\t# scale the coordinates based on the respective ratios in order to reflect bounding box on the original image\n\t\tstartX = int(startX * rW) - hyst_X - x_start_buffer\n\t\tstartY = int(startY * rH) - hyst_Y \n\t\tendX = int(endX * rW) + hyst_X - x_start_buffer\n\t\tendY = int(endY * rH) + hyst_Y \n\n\t\t#bound the bound\n\t\tif (startX < 0):\n\t\t\tstartX = 0\n\t \n\t\tif (startY < 0):\n\t\t\tstartY = 0\n\n\t\tif (endX > origW):\n\t\t\tendX = origW-1\n\t\tif (endY > origH):\n\t\t\tendY = origH-1\n\n\t\tadjusted_box = (startX, startY, endX, endY)\n\t\tadjusted_boxes.append(adjusted_box)\n\n\t#adjusted_boxes = connect_horizontal_boxes(adjusted_boxes, x_threshold=5, y_threshold=15) \n\n\tfor (startX, startY, endX, endY) in adjusted_boxes:\n\t\t#extract the region of interest\n\t\tr = orig[startY:endY, startX:endX]\n\n\t\t#configuration setting to convert image to string. \n\t\t#configuration = (\"-l eng --oem 1 --psm 8\")\n\t\tconfiguration = (\"-l eng --oem 1 --psm 7\")\n\t ##This will recognize the text from the image of bounding box\n\n\n\t\ttry:\n\t\t\ttext = pytesseract.image_to_string(r, config=configuration)\n\t\texcept:\n\t\t\tprint ('Some bounding box out of order')\n\t\t\ttext = 'GHAJEFKJEKAFJEKFAJEFKEJKFAEK'\n\n\t\t# append bbox coordinate and associated text to the list of results \n\t\tresults.append(((startX, startY, endX, endY), text))\n\n\treturn orig, results\n\ndef show_image(image, results):\n\n\t#Display the image with bounding box and recognized text\n\t#orig_image = orig.copy()\n\torig_image = image.copy()\n\n\t# Moving over the results and display on the image\n\tfor ((start_X, start_Y, end_X, end_Y), text) in results:\n\t\t# display the text detected by Tesseract\n\t\tprint(\"{}\\n\".format(text))\n\n\t\t# Displaying text\n\t\ttext = \"\".join([x if ord(x) < 128 else \"\" for x in text]).strip()\n\t\tcv2.rectangle(orig_image, (start_X, start_Y), (end_X, end_Y),\n\t\t\t(0, 0, 255), 2)\n\t\tcv2.putText(orig_image, text, (start_X, start_Y - 30),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7,(0,0, 255), 2)\n\n\tplt.imshow(orig_image)\n\tplt.title('Output')\n\tplt.show()\n\nmodel_name = './pths/east_vgg16.pth'\n#model_name = './pths/sm2-300.pth'\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel = EAST(False).to(device)\nmodel.load_state_dict(torch.load(model_name, map_location=torch.device('cpu')))\n\n# set the model to inference mode\nmodel.eval()\n\n#img_path = \"/Users/surajmenon/Desktop/findocDocs/apple_tc_full1.png\"\nimg_path = \"/Users/surajmenon/Desktop/findocDocs/test_image1.jpg\"\nmin_confidence = .99\nheight = 512\nwidth = 512\n\nprocess_date_x = 15\nprocess_date_y = 5\n\nr_image, results = process_image(True, img_path, model, min_confidence, height, width, hyst_X=process_date_x, hyst_Y=process_date_y)\nshow_image(r_image, results)\n\n", "sub_path": "EAST-master-torch/model_test.py", "file_name": "model_test.py", "file_ext": "py", "file_size_in_byte": 8565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.cos", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 89, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 101, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 183, "usage_type": "attribute"}, {"api_name": "imutils.object_detection.non_max_suppression", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 264, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 267, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 275, "usage_type": "attribute"}, {"api_name": "model.EAST", "line_number": 276, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 277, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 280, "usage_type": "call"}]}
+{"seq_id": "131989729", "text": "# -*- coding: utf-8 -*-\nimport MySQLdb\nimport MySQLdb.cursors\nfrom twisted.enterprise import adbapi\nfrom scrapy.utils.project import get_project_settings\n# import shortuuid\n# import uuid\n\n\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n'''\n\n'''\nSETTINGS = get_project_settings()\n\nclass BrotherwatchingPipeline(object):\n\n def __init__(self):\n self.dbpool = adbapi.ConnectionPool ('MySQLdb',\n host=SETTINGS['DB_HOST'],\n user=SETTINGS['DB_USER'],\n passwd=SETTINGS['DB_PASSWD'],\n port=SETTINGS['DB_PORT'],\n db=SETTINGS['DB_DB'],\n charset='utf8',\n use_unicode = True,\n cursorclass=MySQLdb.cursors.DictCursor\n )\n\n def __del__(self):\n self.dbpool.close()\n\n def process_item(self,item,spider):\n sql='INSERT IGNORE INTO app_review (%s) VALUES (%s)'\n keys = item.keys()\n rows=', '.join(keys)\n values = ','.join(['\\'%s\\'' % item[k] for k in keys])\n self.dbpool.runOperation(sql % (rows,values))\n return item\n", "sub_path": "BrotherWatching/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scrapy.utils.project.get_project_settings", "line_number": 18, "usage_type": "call"}, {"api_name": "twisted.enterprise.adbapi.ConnectionPool", "line_number": 23, "usage_type": "call"}, {"api_name": "twisted.enterprise.adbapi", "line_number": 23, "usage_type": "name"}, {"api_name": "MySQLdb.cursors", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "445210687", "text": "# -*- coding: utf-8 -*-\n\"\"\"Simple models for super resolution such as linear interp models.\"\"\"\nimport numpy as np\nimport logging\nfrom inspect import signature\nimport os\nimport json\nfrom sup3r.utilities.utilities import st_interp\nfrom sup3r.models.abstract import AbstractInterface\n\nlogger = logging.getLogger(__name__)\n\n\nclass LinearInterp(AbstractInterface):\n \"\"\"Simple model to do linear interpolation on the spatial and temporal axes\n \"\"\"\n\n def __init__(self, features, s_enhance, t_enhance, t_centered=False):\n \"\"\"\n Parameters\n ----------\n features : list\n List of feature names that this model will operate on for both\n input and output. This must match the feature axis ordering in the\n array input to generate().\n s_enhance : int\n Integer factor by which the spatial axes is to be enhanced.\n t_enhance : int\n Integer factor by which the temporal axes is to be enhanced.\n t_centered : bool\n Flag to switch time axis from time-beginning (Default, e.g.\n interpolate 00:00 01:00 to 00:00 00:30 01:00 01:30) to\n time-centered (e.g. interp 01:00 02:00 to 00:45 01:15 01:45 02:15)\n \"\"\"\n\n self._features = features\n self._s_enhance = s_enhance\n self._t_enhance = t_enhance\n self._t_centered = t_centered\n\n @classmethod\n def load(cls, model_dir, verbose=False):\n \"\"\"Load the LinearInterp model with its params saved to the model_dir\n created with LinearInterp.save(model_dir)\n\n Parameters\n ----------\n model_dir : str\n Directory to load LinearInterp model files from. Must\n have a model_params.json file containing \"meta\" key with all of the\n class init args.\n verbose : bool\n Flag to log information about the loaded model.\n\n Returns\n -------\n out : LinearInterp\n Returns an initialized LinearInterp model\n \"\"\"\n fp_params = os.path.join(model_dir, 'model_params.json')\n assert os.path.exists(fp_params), f'Could not find: {fp_params}'\n with open(fp_params, 'r') as f:\n params = json.load(f)\n\n meta = params['meta']\n args = signature(cls.__init__).parameters\n kwargs = {k: v for k, v in meta.items() if k in args}\n model = cls(**kwargs)\n\n if verbose:\n logger.info('Loading LinearInterp with meta data: {}'\n .format(model.meta))\n\n return model\n\n @property\n def meta(self):\n \"\"\"Get meta data dictionary that defines the model params\"\"\"\n return {'features': self._features,\n 's_enhance': self._s_enhance,\n 't_enhance': self._t_enhance,\n 't_centered': self._t_centered,\n 'training_features': self.training_features,\n 'output_features': self.output_features,\n 'class': self.__class__.__name__,\n }\n\n @property\n def training_features(self):\n \"\"\"Get the list of input feature names that the generative model was\n trained on.\n \"\"\"\n return self._features\n\n @property\n def output_features(self):\n \"\"\"Get the list of output feature names that the generative model\n outputs\"\"\"\n return self._features\n\n def save(self, out_dir):\n \"\"\"\n Parameters\n ----------\n out_dir : str\n Directory to save linear model params. This directory will be\n created if it does not already exist.\n \"\"\"\n self.save_params(out_dir)\n\n # pylint: disable=unused-argument\n def generate(self, low_res, norm_in=False, un_norm_out=False,\n exogenous_data=None):\n \"\"\"Use the generator model to generate high res data from low res\n input. This is the public generate function.\n\n Parameters\n ----------\n low_res : np.ndarray\n Low-resolution spatiotemporal input data, a 5D array of shape:\n (n_obs, spatial_1, spatial_2, temporal, n_features)\n norm_in : bool\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n un_norm_out : bool\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n exogenous_data : list\n This doesnt do anything for this LinearInterp, but is\n kept to keep the same interface as Sup3rGan\n\n Returns\n -------\n hi_res : ndarray\n high-resolution spatial output data, a 5D array of shape:\n (n_obs, spatial_1, spatial_2, temporal, n_features)\n \"\"\"\n\n hr_shape = (len(low_res),\n int(low_res.shape[1] * self._s_enhance),\n int(low_res.shape[2] * self._s_enhance),\n int(low_res.shape[3] * self._t_enhance),\n len(self.output_features))\n logger.debug('LinearInterp model with s_enhance of {} '\n 'and t_enhance of {} '\n 'downscaling low-res shape {} to high-res shape {}'\n .format(self._s_enhance, self._t_enhance,\n low_res.shape, hr_shape))\n\n hi_res = np.zeros(hr_shape, dtype=np.float32)\n\n for iobs in range(len(low_res)):\n for idf in range(low_res.shape[-1]):\n hi_res[iobs, ..., idf] = st_interp(low_res[iobs, ..., idf],\n self.s_enhance,\n self.t_enhance,\n t_centered=self._t_centered)\n\n return hi_res\n", "sub_path": "sup3r/models/linear.py", "file_name": "linear.py", "file_ext": "py", "file_size_in_byte": 5814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "sup3r.models.abstract.AbstractInterface", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 63, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sup3r.utilities.utilities.st_interp", "line_number": 154, "usage_type": "call"}]}
+{"seq_id": "10262458", "text": "import plotly.express as px\nimport csv\nimport numpy as np\n\ndef plotfigure(data_path):\n with open(data_path) as csv_file:\n df=csv.DictReader(csv_file)\n fig=px.scatter(df,x=\"Days Present\",y=\"Marks In Percentage\")\n fig.show()\ndef getDataSource(data_path):\n MarksInPercentage=[]\n DaysPresent=[]\n with open(data_path)as csv_file:\n csv_reader=csv.DictReader(csv_file)\n for row in csv_reader:\n MarksInPercentage.append(float(row[\"Marks In Percentage\"]))\n DaysPresent.append(float(row[\"Days Present\"]))\n\n \n return{\"x\":MarksInPercentage,\"y\":DaysPresent}\n\ndef findcorrelation(dataSource):\n correlation=np.corrcoef(dataSource[\"x\"],dataSource[\"y\"])\n print(\"Co relation between Marks and Days Present: \\n=\",correlation[0,1])\n\n\ndef setup():\n data_path=\"data2.csv\"\n dataSource=getDataSource(data_path)\n findcorrelation(dataSource)\n plotfigure(data_path)\nsetup()\n", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 944, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "csv.DictReader", "line_number": 7, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 8, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 8, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "141357636", "text": "#!/usr/bin/python3\nimport itertools\n\n__author__ = 'Pavel Yurgin'\n\nimport bz2\n\n\ndef read_header(wiki):\n header = []\n for line in wiki:\n header.append(line)\n if line.strip() == '':\n return header\n\n\ndef read_page(wiki, skip_redirect=True):\n page = []\n for line in wiki:\n if '#REDIRECT' in line and skip_redirect:\n for line in wiki:\n if line.strip() == '':\n page = []\n break\n else:\n page.append(line)\n if line.strip() == '':\n return page\n\n\ndef split_wiki(input, output, count=float('inf'), skip_redirect=True):\n with bz2.open(input, mode='rt') as input, open(output, 'w', buffering=1024 * 1024) as output:\n header = read_header(input)\n output.writelines(header)\n for i in itertools.count():\n if i > count:\n break\n page = read_page(input, skip_redirect=skip_redirect)\n output.write('\\n'.join(page))\n if i % 1000 == 0 and i != 0:\n print('{} pages processed'.format(i))\n output.write('')\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n '''Simple script for getting part of compressed wikipedia dump with ''')\n parser.add_argument('--input', metavar='input', type=str,\n help='path to input compressed wikipedia xml', required=True)\n\n parser.add_argument('--output', metavar='output', type=str,\n help='path to output xml', required=True)\n\n parser.add_argument('--count', metavar='count', type=int, required=True,\n help='page count')\n parser.add_argument('--skip_redirected', metavar='skip_redirected', type=bool,\n help='skip page with redirect')\n\n args = parser.parse_args()\n args = vars(args)\n args = {key: args[key] for key in args if args[key] is not None}\n\n split_wiki(**args)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "dataset/split_wiki_dump.py", "file_name": "split_wiki_dump.py", "file_ext": "py", "file_size_in_byte": 2054, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "bz2.open", "line_number": 32, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "209043014", "text": "# Copyright (c) 2012-2013 LiuYC https://github.com/liuyichen/\n# Copyright 2012-2014 ksyun.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport base64\nimport datetime\nfrom hashlib import sha256\nfrom hashlib import sha1\nimport hmac\nimport logging\nfrom email.utils import formatdate\nfrom operator import itemgetter\nimport functools\nimport time\nimport calendar\n\nfrom kscore.exceptions import NoCredentialsError\nfrom kscore.utils import normalize_url_path, percent_encode_sequence\nfrom kscore.compat import HTTPHeaders\nfrom kscore.compat import quote, unquote, urlsplit, parse_qs\nfrom kscore.compat import urlunsplit\nfrom kscore.compat import json\nfrom collections import namedtuple\n\n\nimport sys\nimport logging\nimport select\nimport functools\nimport socket\nimport inspect\n\nfrom kscore.compat import six\nfrom kscore.compat import HTTPHeaders, HTTPResponse, urlunsplit, urlsplit\nfrom kscore.exceptions import UnseekableStreamError\nfrom kscore.utils import percent_encode_sequence\nfrom kscore.vendored.requests import models\nfrom kscore.vendored.requests.sessions import REDIRECT_STATI\nfrom kscore.vendored.requests.packages.urllib3.connection import \\\n VerifiedHTTPSConnection\nfrom kscore.vendored.requests.packages.urllib3.connection import \\\n HTTPConnection\nfrom kscore.vendored.requests.packages.urllib3.connectionpool import \\\n HTTPConnectionPool\nfrom kscore.vendored.requests.packages.urllib3.connectionpool import \\\n HTTPSConnectionPool\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass KSHTTPResponse(HTTPResponse):\n # The *args, **kwargs is used because the args are slightly\n # different in py2.6 than in py2.7/py3.\n def __init__(self, *args, **kwargs):\n self._status_tuple = kwargs.pop('status_tuple')\n HTTPResponse.__init__(self, *args, **kwargs)\n\n def _read_status(self):\n if self._status_tuple is not None:\n status_tuple = self._status_tuple\n self._status_tuple = None\n return status_tuple\n else:\n return HTTPResponse._read_status(self)\n\n\nclass KSHTTPConnection(HTTPConnection):\n \"\"\"HTTPConnection that supports Expect 100-continue.\n\n This is conceptually a subclass of httplib.HTTPConnection (though\n technically we subclass from urllib3, which subclasses\n httplib.HTTPConnection) and we only override this class to support Expect\n 100-continue, which we need for S3. As far as I can tell, this is\n general purpose enough to not be specific to S3, but I'm being\n tentative and keeping it in kscore because I've only tested\n this against KSYUN services.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n HTTPConnection.__init__(self, *args, **kwargs)\n self._original_response_cls = self.response_class\n # We'd ideally hook into httplib's states, but they're all\n # __mangled_vars so we use our own state var. This variable is set\n # when we receive an early response from the server. If this value is\n # set to True, any calls to send() are noops. This value is reset to\n # false every time _send_request is called. This is to workaround the\n # fact that py2.6 (and only py2.6) has a separate send() call for the\n # body in _send_request, as opposed to endheaders(), which is where the\n # body is sent in all versions > 2.6.\n self._response_received = False\n self._expect_header_set = False\n\n def close(self):\n HTTPConnection.close(self)\n # Reset all of our instance state we were tracking.\n self._response_received = False\n self._expect_header_set = False\n self.response_class = self._original_response_cls\n\n def _tunnel(self):\n # Works around a bug in py26 which is fixed in later versions of\n # python. Bug involves hitting an infinite loop if readline() returns\n # nothing as opposed to just ``\\r\\n``.\n # As much as I don't like having if py2: code blocks, this seems\n # the cleanest way to handle this workaround. Fortunately, the\n # difference from py26 to py3 is very minimal. We're essentially\n # just overriding the while loop.\n if sys.version_info[:2] != (2, 6):\n return HTTPConnection._tunnel(self)\n\n # Otherwise we workaround the issue.\n self._set_hostport(self._tunnel_host, self._tunnel_port)\n self.send(\"CONNECT %s:%d HTTP/1.0\\r\\n\" % (self.host, self.port))\n for header, value in self._tunnel_headers.iteritems():\n self.send(\"%s: %s\\r\\n\" % (header, value))\n self.send(\"\\r\\n\")\n response = self.response_class(self.sock, strict=self.strict,\n method=self._method)\n (version, code, message) = response._read_status()\n\n if code != 200:\n self.close()\n raise socket.error(\"Tunnel connection failed: %d %s\" %\n (code, message.strip()))\n while True:\n line = response.fp.readline()\n if not line:\n break\n if line in (b'\\r\\n', b'\\n', b''):\n break\n\n def _send_request(self, method, url, body, headers, *py36_up_extra):\n self._response_received = False\n if headers.get('Expect', b'') == b'100-continue':\n self._expect_header_set = True\n else:\n self._expect_header_set = False\n self.response_class = self._original_response_cls\n rval = HTTPConnection._send_request(\n self, method, url, body, headers, *py36_up_extra)\n self._expect_header_set = False\n return rval\n\n def _convert_to_bytes(self, mixed_buffer):\n # Take a list of mixed str/bytes and convert it\n # all into a single bytestring.\n # Any six.text_types will be encoded as utf-8.\n bytes_buffer = []\n for chunk in mixed_buffer:\n if isinstance(chunk, six.text_type):\n bytes_buffer.append(chunk.encode('utf-8'))\n else:\n bytes_buffer.append(chunk)\n msg = b\"\\r\\n\".join(bytes_buffer)\n return msg\n\n def _send_output(self, message_body=None, **py36_up_extra):\n self._buffer.extend((b\"\", b\"\"))\n msg = self._convert_to_bytes(self._buffer)\n del self._buffer[:]\n # If msg and message_body are sent in a single send() call,\n # it will avoid performance problems caused by the interaction\n # between delayed ack and the Nagle algorithm.\n if isinstance(message_body, bytes):\n msg += message_body\n message_body = None\n self.send(msg)\n if self._expect_header_set:\n # This is our custom behavior. If the Expect header was\n # set, it will trigger this custom behavior.\n logger.debug(\"Waiting for 100 Continue response.\")\n # Wait for 1 second for the server to send a response.\n read, write, exc = select.select([self.sock], [], [self.sock], 1)\n if read:\n self._handle_expect_response(message_body)\n return\n else:\n # From the RFC:\n # Because of the presence of older implementations, the\n # protocol allows ambiguous situations in which a client may\n # send \"Expect: 100-continue\" without receiving either a 417\n # (Expectation Failed) status or a 100 (Continue) status.\n # Therefore, when a client sends this header field to an origin\n # server (possibly via a proxy) from which it has never seen a\n # 100 (Continue) status, the client SHOULD NOT wait for an\n # indefinite period before sending the request body.\n logger.debug(\"No response seen from server, continuing to \"\n \"send the response body.\")\n if message_body is not None:\n # message_body was not a string (i.e. it is a file), and\n # we must run the risk of Nagle.\n self.send(message_body)\n\n def _consume_headers(self, fp):\n # Most servers (including S3) will just return\n # the CLRF after the 100 continue response. However,\n # some servers (I've specifically seen this for squid when\n # used as a straight HTTP proxy) will also inject a\n # Connection: keep-alive header. To account for this\n # we'll read until we read '\\r\\n', and ignore any headers\n # that come immediately after the 100 continue response.\n current = None\n while current != b'\\r\\n':\n current = fp.readline()\n\n def _handle_expect_response(self, message_body):\n # This is called when we sent the request headers containing\n # an Expect: 100-continue header and received a response.\n # We now need to figure out what to do.\n fp = self.sock.makefile('rb', 0)\n try:\n maybe_status_line = fp.readline()\n parts = maybe_status_line.split(None, 2)\n if self._is_100_continue_status(maybe_status_line):\n self._consume_headers(fp)\n logger.debug(\"100 Continue response seen, \"\n \"now sending request body.\")\n self._send_message_body(message_body)\n elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):\n # From the RFC:\n # Requirements for HTTP/1.1 origin servers:\n #\n # - Upon receiving a request which includes an Expect\n # request-header field with the \"100-continue\"\n # expectation, an origin server MUST either respond with\n # 100 (Continue) status and continue to read from the\n # input stream, or respond with a final status code.\n #\n # So if we don't get a 100 Continue response, then\n # whatever the server has sent back is the final response\n # and don't send the message_body.\n logger.debug(\"Received a non 100 Continue response \"\n \"from the server, NOT sending request body.\")\n status_tuple = (parts[0].decode('ascii'),\n int(parts[1]), parts[2].decode('ascii'))\n response_class = functools.partial(\n KSHTTPResponse, status_tuple=status_tuple)\n self.response_class = response_class\n self._response_received = True\n finally:\n fp.close()\n\n def _send_message_body(self, message_body):\n if message_body is not None:\n self.send(message_body)\n\n def send(self, str):\n if self._response_received:\n logger.debug(\"send() called, but reseponse already received. \"\n \"Not sending data.\")\n return\n return HTTPConnection.send(self, str)\n\n def _is_100_continue_status(self, maybe_status_line):\n parts = maybe_status_line.split(None, 2)\n # Check for HTTP/ 100 Continue\\r\\n\n return (\n len(parts) >= 3 and parts[0].startswith(b'HTTP/') and\n parts[1] == b'100')\n\n\nclass KSHTTPSConnection(VerifiedHTTPSConnection):\n pass\n\n\n# Now we need to set the methods we overrode from KSHTTPConnection\n# onto KSHTTPSConnection. This is just a shortcut to avoid\n# copy/pasting the same code into KSHTTPSConnection.\nfor name, function in KSHTTPConnection.__dict__.items():\n if inspect.isfunction(function):\n setattr(KSHTTPSConnection, name, function)\n\n\ndef prepare_request_dict(request_dict, endpoint_url, user_agent=None):\n \"\"\"\n This method prepares a request dict to be created into an\n KSRequestObject. This prepares the request dict by adding the\n url and the user agent to the request dict.\n\n :type request_dict: dict\n :param request_dict: The request dict (created from the\n ``serialize`` module).\n\n :type user_agent: string\n :param user_agent: The user agent to use for this request.\n\n :type endpoint_url: string\n :param endpoint_url: The full endpoint url, which contains at least\n the scheme, the hostname, and optionally any path components.\n \"\"\"\n r = request_dict\n if user_agent is not None:\n headers = r['headers']\n headers['User-Agent'] = user_agent\n url = _urljoin(endpoint_url, r['url_path'])\n if r['query_string']:\n encoded_query_string = percent_encode_sequence(r['query_string'])\n if '?' not in url:\n url += '?%s' % encoded_query_string\n else:\n url += '&%s' % encoded_query_string\n r['url'] = url\n\n\ndef create_request_object(request_dict):\n \"\"\"\n This method takes a request dict and creates an KSRequest object\n from it.\n\n :type request_dict: dict\n :param request_dict: The request dict (created from the\n ``prepare_request_dict`` method).\n\n :rtype: ``kscore.ksrequest.KSRequest``\n :return: An KSRequest object based on the request_dict.\n\n \"\"\"\n r = request_dict\n return KSRequest(method=r['method'], url=r['url'],\n data=r['body'],\n headers=r['headers'])\n\n\ndef _urljoin(endpoint_url, url_path):\n p = urlsplit(endpoint_url)\n # - \n # scheme - p[0]\n # netloc - p[1]\n # path - p[2]\n # query - p[3]\n # fragment - p[4]\n if not url_path or url_path == '/':\n # If there's no path component, ensure the URL ends with\n # a '/' for backwards compatibility.\n if not p[2]:\n return endpoint_url + '/'\n return endpoint_url\n if p[2].endswith('/') and url_path.startswith('/'):\n new_path = p[2][:-1] + url_path\n else:\n new_path = p[2] + url_path\n reconstructed = urlunsplit((p[0], p[1], new_path, p[3], p[4]))\n return reconstructed\n\n\nclass KSRequest(models.RequestEncodingMixin, models.Request):\n def __init__(self, *args, **kwargs):\n self.auth_path = None\n if 'auth_path' in kwargs:\n self.auth_path = kwargs['auth_path']\n del kwargs['auth_path']\n models.Request.__init__(self, *args, **kwargs)\n headers = HTTPHeaders()\n if self.headers is not None:\n for key, value in self.headers.items():\n headers[key] = value\n self.headers = headers\n # This is a dictionary to hold information that is used when\n # processing the request. What is inside of ``context`` is open-ended.\n # For example, it may have a timestamp key that is used for holding\n # what the timestamp is when signing the request. Note that none\n # of the information that is inside of ``context`` is directly\n # sent over the wire; the information is only used to assist in\n # creating what is sent over the wire.\n self.context = {}\n\n def prepare(self):\n \"\"\"Constructs a :class:`KSPreparedRequest `.\"\"\"\n # Eventually I think it would be nice to add hooks into this process.\n p = KSPreparedRequest(self)\n p.prepare_method(self.method)\n p.prepare_url(self.url, self.params)\n p.prepare_headers(self.headers)\n p.prepare_cookies(self.cookies)\n p.prepare_body(self.data, self.files)\n p.prepare_auth(self.auth)\n return p\n\n @property\n def body(self):\n p = models.PreparedRequest()\n p.prepare_headers({})\n p.prepare_body(self.data, self.files)\n if isinstance(p.body, six.text_type):\n p.body = p.body.encode('utf-8')\n return p.body\n\n\nclass KSPreparedRequest(models.PreparedRequest):\n \"\"\"Represents a prepared request.\n\n :ivar method: HTTP Method\n :ivar url: The full url\n :ivar headers: The HTTP headers to send.\n :ivar body: The HTTP body.\n :ivar hooks: The set of callback hooks.\n\n In addition to the above attributes, the following attributes are\n available:\n\n :ivar query_params: The original query parameters.\n :ivar post_param: The original POST params (dict).\n\n \"\"\"\n def __init__(self, original_request):\n self.original = original_request\n super(KSPreparedRequest, self).__init__()\n self.hooks.setdefault('response', []).append(\n self.reset_stream_on_redirect)\n\n def reset_stream_on_redirect(self, response, **kwargs):\n if response.status_code in REDIRECT_STATI and \\\n self._looks_like_file(self.body):\n logger.debug(\"Redirect received, rewinding stream: %s\", self.body)\n self.reset_stream()\n\n def _looks_like_file(self, body):\n return hasattr(body, 'read') and hasattr(body, 'seek')\n\n def reset_stream(self):\n # Trying to reset a stream when there is a no stream will\n # just immediately return. It's not an error, it will produce\n # the same result as if we had actually reset the stream (we'll send\n # the entire body contents again if we need to).\n # Same case if the body is a string/bytes type.\n if self.body is None or isinstance(self.body, six.text_type) or \\\n isinstance(self.body, six.binary_type):\n return\n try:\n logger.debug(\"Rewinding stream: %s\", self.body)\n self.body.seek(0)\n except Exception as e:\n logger.debug(\"Unable to rewind stream: %s\", e)\n raise UnseekableStreamError(stream_object=self.body)\n\n def prepare_body(self, data, files, json=None):\n \"\"\"Prepares the given HTTP body data.\"\"\"\n super(KSPreparedRequest, self).prepare_body(data, files, json)\n\n # Calculate the Content-Length by trying to seek the file as\n # requests cannot determine content length for some seekable file-like\n # objects.\n if 'Content-Length' not in self.headers:\n if hasattr(data, 'seek') and hasattr(data, 'tell'):\n orig_pos = data.tell()\n data.seek(0, 2)\n end_file_pos = data.tell()\n self.headers['Content-Length'] = str(end_file_pos - orig_pos)\n data.seek(orig_pos)\n # If the Content-Length was added this way, a\n # Transfer-Encoding was added by requests because it did\n # not add a Content-Length header. However, the\n # Transfer-Encoding header is not supported for\n # KSYUN Services so remove it if it is added.\n if 'Transfer-Encoding' in self.headers:\n self.headers.pop('Transfer-Encoding')\n\nHTTPSConnectionPool.ConnectionCls = KSHTTPSConnection\nHTTPConnectionPool.ConnectionCls = KSHTTPConnection\n\n\nlogger = logging.getLogger(__name__)\n\n\nEMPTY_SHA256_HASH = (\n 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')\n# This is the buffer size used when calculating sha256 checksums.\n# Experimenting with various buffer sizes showed that this value generally\n# gave the best result (in terms of performance).\nPAYLOAD_BUFFER = 1024 * 1024\nISO8601 = '%Y-%m-%dT%H:%M:%SZ'\nSIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'\nSIGNED_HEADERS_BLACKLIST = [\n 'expect',\n 'user-agent'\n]\n\n\nclass BaseSigner(object):\n REQUIRES_REGION = False\n\n def add_auth(self, request):\n raise NotImplementedError(\"add_auth\")\n\n\nReadOnlyCredentials = namedtuple('ReadOnlyCredentials',\n ['access_key', 'secret_key', 'token'])\n\n\nclass Credentials(object):\n \"\"\"\n Holds the credentials needed to authenticate requests.\n\n :ivar access_key: The access key part of the credentials.\n :ivar secret_key: The secret key part of the credentials.\n :ivar token: The security token, valid only for session credentials.\n :ivar method: A string which identifies where the credentials\n were found.\n \"\"\"\n\n def __init__(self, access_key, secret_key, token=None,\n method=None):\n self.access_key = access_key\n self.secret_key = secret_key\n self.token = token\n\n if method is None:\n method = 'explicit'\n self.method = method\n\n self._normalize()\n\n def _normalize(self):\n # Keys would sometimes (accidentally) contain non-ascii characters.\n # It would cause a confusing UnicodeDecodeError in Python 2.\n # We explicitly convert them into unicode to avoid such error.\n #\n # Eventually the service will decide whether to accept the credential.\n # This also complies with the behavior in Python 3.\n\n self.access_key = self.access_key\n self.secret_key = self.secret_key\n\n def get_frozen_credentials(self):\n return ReadOnlyCredentials(self.access_key,\n self.secret_key,\n self.token)\n\n\nclass SigV4Auth(BaseSigner):\n \"\"\"\n Sign a request with Signature V4.\n \"\"\"\n REQUIRES_REGION = True\n\n def __init__(self, credentials, service_name, region_name):\n self.credentials = credentials\n # We initialize these value here so the unit tests can have\n # valid values. But these will get overriden in ``add_auth``\n # later for real requests.\n self._region_name = region_name\n self._service_name = service_name\n\n def _sign(self, key, msg, hex=False):\n if hex:\n sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()\n else:\n sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()\n return sig\n\n def headers_to_sign(self, request):\n \"\"\"\n Select the headers from the request that need to be included\n in the StringToSign.\n \"\"\"\n header_map = HTTPHeaders()\n split = urlsplit(request.url)\n for name, value in request.headers.items():\n lname = name.lower()\n if lname not in SIGNED_HEADERS_BLACKLIST:\n header_map[lname] = value\n if 'host' not in header_map:\n header_map['host'] = split.netloc\n return header_map\n\n def canonical_query_string(self, request):\n # The query string can come from two parts. One is the\n # params attribute of the request. The other is from the request\n # url (in which case we have to re-split the url into its components\n # and parse out the query string component).\n if request.params:\n return self._canonical_query_string_params(request.params)\n else:\n return self._canonical_query_string_url(urlsplit(request.url))\n\n def _canonical_query_string_params(self, params):\n l = []\n for param in sorted(params):\n value = str(params[param])\n l.append('%s=%s' % (quote(param, safe='-_.~'),\n quote(value, safe='-_.~')))\n cqs = '&'.join(l)\n return cqs\n\n def _canonical_query_string_url(self, parts):\n canonical_query_string = ''\n if parts.query:\n # [(key, value), (key2, value2)]\n key_val_pairs = []\n for pair in parts.query.split('&'):\n key, _, value = pair.partition('=')\n key_val_pairs.append((key, value))\n sorted_key_vals = []\n # Sort by the key names, and in the case of\n # repeated keys, then sort by the value.\n for key, value in sorted(key_val_pairs):\n sorted_key_vals.append('%s=%s' % (key, value))\n canonical_query_string = '&'.join(sorted_key_vals)\n return canonical_query_string\n\n def canonical_headers(self, headers_to_sign):\n \"\"\"\n Return the headers that need to be included in the StringToSign\n in their canonical form by converting all header keys to lower\n case, sorting them in alphabetical order and then joining\n them into a string, separated by newlines.\n \"\"\"\n headers = []\n sorted_header_names = sorted(set(headers_to_sign))\n for key in sorted_header_names:\n value = ','.join(v.strip() for v in\n sorted(headers_to_sign.get_all(key)))\n headers.append('%s:%s' % (key, value))\n return '\\n'.join(headers)\n\n def signed_headers(self, headers_to_sign):\n l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]\n l = sorted(l)\n return ';'.join(l)\n\n def payload(self, request):\n if request.body and hasattr(request.body, 'seek'):\n position = request.body.tell()\n read_chunksize = functools.partial(request.body.read,\n PAYLOAD_BUFFER)\n checksum = sha256()\n for chunk in iter(read_chunksize, b''):\n checksum.update(chunk)\n hex_checksum = checksum.hexdigest()\n request.body.seek(position)\n return hex_checksum\n elif request.body:\n # The request serialization has ensured that\n # request.body is a bytes() type.\n return sha256(request.body).hexdigest()\n else:\n return EMPTY_SHA256_HASH\n\n def canonical_request(self, request):\n cr = [request.method.upper()]\n path = self._normalize_url_path(urlsplit(request.url).path)\n cr.append(path)\n cr.append(self.canonical_query_string(request))\n headers_to_sign = self.headers_to_sign(request)\n cr.append(self.canonical_headers(headers_to_sign) + '\\n')\n cr.append(self.signed_headers(headers_to_sign))\n if 'X-Amz-Content-SHA256' in request.headers:\n body_checksum = request.headers['X-Amz-Content-SHA256']\n else:\n body_checksum = self.payload(request)\n cr.append(body_checksum)\n return '\\n'.join(cr)\n\n def _normalize_url_path(self, path):\n normalized_path = quote(normalize_url_path(path), safe='/~')\n return normalized_path\n\n def scope(self, request):\n scope = [self.credentials.access_key]\n scope.append(request.context['timestamp'][0:8])\n scope.append(self._region_name)\n scope.append(self._service_name)\n scope.append('aws4_request')\n return '/'.join(scope)\n\n def credential_scope(self, request):\n scope = []\n scope.append(request.context['timestamp'][0:8])\n scope.append(self._region_name)\n scope.append(self._service_name)\n scope.append('aws4_request')\n return '/'.join(scope)\n\n def string_to_sign(self, request, canonical_request):\n \"\"\"\n Return the canonical StringToSign as well as a dict\n containing the original version of all headers that\n were included in the StringToSign.\n \"\"\"\n sts = ['AWS4-HMAC-SHA256']\n sts.append(request.context['timestamp'])\n sts.append(self.credential_scope(request))\n sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())\n return '\\n'.join(sts)\n\n def signature(self, string_to_sign, request):\n key = self.credentials.secret_key\n k_date = self._sign(('AWS4' + key).encode('utf-8'),\n request.context['timestamp'][0:8])\n k_region = self._sign(k_date, self._region_name)\n k_service = self._sign(k_region, self._service_name)\n k_signing = self._sign(k_service, 'aws4_request')\n return self._sign(k_signing, string_to_sign, hex=True)\n\n def add_auth(self, request):\n if self.credentials is None:\n raise NoCredentialsError\n datetime_now = datetime.datetime.utcnow()\n request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)\n # This could be a retry. Make sure the previous\n # authorization header is removed first.\n self._modify_request_before_signing(request)\n canonical_request = self.canonical_request(request)\n logger.debug(\"Calculating signature using v4 auth.\")\n logger.debug('CanonicalRequest:\\n%s', canonical_request)\n string_to_sign = self.string_to_sign(request, canonical_request)\n logger.debug('StringToSign:\\n%s', string_to_sign)\n signature = self.signature(string_to_sign, request)\n logger.debug('Signature:\\n%s', signature)\n\n self._inject_signature_to_request(request, signature)\n\n def _inject_signature_to_request(self, request, signature):\n l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]\n headers_to_sign = self.headers_to_sign(request)\n l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))\n l.append('Signature=%s' % signature)\n request.headers['Authorization'] = ', '.join(l)\n return request\n\n def _modify_request_before_signing(self, request):\n if 'Authorization' in request.headers:\n del request.headers['Authorization']\n self._set_necessary_date_headers(request)\n if self.credentials.token:\n if 'X-Amz-Security-Token' in request.headers:\n del request.headers['X-Amz-Security-Token']\n request.headers['X-Amz-Security-Token'] = self.credentials.token\n\n def _set_necessary_date_headers(self, request):\n # The spec allows for either the Date _or_ the X-Amz-Date value to be\n # used so we check both. If there's a Date header, we use the date\n # header. Otherwise we use the X-Amz-Date header.\n if 'Date' in request.headers:\n del request.headers['Date']\n datetime_timestamp = datetime.datetime.strptime(\n request.context['timestamp'], SIGV4_TIMESTAMP)\n request.headers['Date'] = formatdate(\n int(calendar.timegm(datetime_timestamp.timetuple())))\n if 'X-Amz-Date' in request.headers:\n del request.headers['X-Amz-Date']\n else:\n if 'X-Amz-Date' in request.headers:\n del request.headers['X-Amz-Date']\n request.headers['X-Amz-Date'] = request.context['timestamp']\n\n\nclass S3SigV4Auth(SigV4Auth):\n\n def _modify_request_before_signing(self, request):\n super(S3SigV4Auth, self)._modify_request_before_signing(request)\n if 'X-Amz-Content-SHA256' in request.headers:\n del request.headers['X-Amz-Content-SHA256']\n request.headers['X-Amz-Content-SHA256'] = self.payload(request)\n\n def _normalize_url_path(self, path):\n # For S3, we do not normalize the path.\n return path\n\n\nclass SigV4QueryAuth(SigV4Auth):\n DEFAULT_EXPIRES = 3600\n\n def __init__(self, credentials, service_name, region_name,\n expires=DEFAULT_EXPIRES):\n super(SigV4QueryAuth, self).__init__(credentials, service_name,\n region_name)\n self._expires = expires\n\n def _modify_request_before_signing(self, request):\n # Note that we're not including X-Amz-Signature.\n # From the docs: \"The Canonical Query String must include all the query\n # parameters from the preceding table except for X-Amz-Signature.\n signed_headers = self.signed_headers(self.headers_to_sign(request))\n auth_params = {\n 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',\n 'X-Amz-Credential': self.scope(request),\n 'X-Amz-Date': request.context['timestamp'],\n 'X-Amz-Expires': self._expires,\n 'X-Amz-SignedHeaders': signed_headers,\n }\n if self.credentials.token is not None:\n auth_params['X-Amz-Security-Token'] = self.credentials.token\n # Now parse the original query string to a dict, inject our new query\n # params, and serialize back to a query string.\n url_parts = urlsplit(request.url)\n # parse_qs makes each value a list, but in our case we know we won't\n # have repeated keys so we know we have single element lists which we\n # can convert back to scalar values.\n query_dict = dict(\n [(k, v[0]) for k, v in parse_qs(url_parts.query).items()])\n # The spec is particular about this. It *has* to be:\n # https://?&\n # You can't mix the two types of params together, i.e just keep doing\n # new_query_params.update(op_params)\n # new_query_params.update(auth_params)\n # percent_encode_sequence(new_query_params)\n operation_params = ''\n if request.data:\n # We also need to move the body params into the query string.\n # request.data will be populated, for example, with query services\n # which normally form encode the params into the body.\n # This means that request.data is a dict() of the operation params.\n query_dict.update(request.data)\n request.data = ''\n if query_dict:\n operation_params = percent_encode_sequence(query_dict) + '&'\n new_query_string = (operation_params +\n percent_encode_sequence(auth_params))\n # url_parts is a tuple (and therefore immutable) so we need to create\n # a new url_parts with the new query string.\n # - \n # scheme - 0\n # netloc - 1\n # path - 2\n # query - 3 <-- we're replacing this.\n # fragment - 4\n p = url_parts\n new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])\n request.url = urlunsplit(new_url_parts)\n\n def _inject_signature_to_request(self, request, signature):\n # Rather than calculating an \"Authorization\" header, for the query\n # param quth, we just append an 'X-Amz-Signature' param to the end\n # of the query string.\n request.url += '&X-Amz-Signature=%s' % signature\n\n\nclass S3SigV4QueryAuth(SigV4QueryAuth):\n \"\"\"S3 SigV4 auth using query parameters.\n\n This signer will sign a request using query parameters and signature\n version 4, i.e a \"presigned url\" signer.\n\n\n \"\"\"\n def _normalize_url_path(self, path):\n # For S3, we do not normalize the path.\n return path\n\n def payload(self, request):\n # From the doc link above:\n # \"You don't include a payload hash in the Canonical Request, because\n # when you create a presigned URL, you don't know anything about the\n # payload. Instead, you use a constant string \"UNSIGNED-PAYLOAD\".\n return \"UNSIGNED-PAYLOAD\"\n\n\nclass S3SigV4PostAuth(SigV4Auth):\n \"\"\"\n Presigns a s3 post\n\n \"\"\"\n def add_auth(self, request):\n datetime_now = datetime.datetime.utcnow()\n request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)\n\n fields = {}\n if request.context.get('s3-presign-post-fields', None) is not None:\n fields = request.context['s3-presign-post-fields']\n\n policy = {}\n conditions = []\n if request.context.get('s3-presign-post-policy', None) is not None:\n policy = request.context['s3-presign-post-policy']\n if policy.get('conditions', None) is not None:\n conditions = policy['conditions']\n\n policy['conditions'] = conditions\n\n fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'\n fields['x-amz-credential'] = self.scope(request)\n fields['x-amz-date'] = request.context['timestamp']\n\n conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})\n conditions.append({'x-amz-credential': self.scope(request)})\n conditions.append({'x-amz-date': request.context['timestamp']})\n\n if self.credentials.token is not None:\n fields['x-amz-security-token'] = self.credentials.token\n conditions.append({'x-amz-security-token': self.credentials.token})\n\n # Dump the base64 encoded policy into the fields dictionary.\n fields['policy'] = base64.b64encode(\n json.dumps(policy).encode('utf-8')).decode('utf-8')\n\n fields['x-amz-signature'] = self.signature(fields['policy'], request)\n\n request.context['s3-presign-post-fields'] = fields\n request.context['s3-presign-post-policy'] = policy\n\n\nAUTH_TYPE_MAPS = {\n 'v4': SigV4Auth,\n 'v4-query': SigV4QueryAuth,\n 's3v4': S3SigV4Auth,\n 's3v4-query': S3SigV4QueryAuth,\n 's3v4-presign-post': S3SigV4PostAuth,\n\n}\n\n\n\nif __name__ == '__main__':\n access_key = \"AKLTJZEjW05lQEGx1Z_g07AazA\"\n secret_key = \"OAcfe1+lkHucQoaVMUbIhlaDK2D8QuFMv4jHiRRtgtNqYVaEOWLv3MaRZAlk565hRg==\"\n credentials = Credentials(access_key, secret_key)\n v4 = SigV4Auth(credentials,\"iam\", \"cn-beijing-6\")\n request_dict = {'context': '', 'url': 'http://10.100.50.90', 'headers': {}, 'method': 'GET', 'params': '', 'body': ''}\n request = create_request_object(request_dict)\n print(v4.add_auth(request))\n", "sub_path": "Tools/AWSTEST.py", "file_name": "AWSTEST.py", "file_ext": "py", "file_size_in_byte": 37220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 58, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 61, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPResponse.__init__", "line_number": 66, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 66, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPResponse._read_status", "line_number": 74, "usage_type": "call"}, {"api_name": "kscore.compat.HTTPResponse", "line_number": 74, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 77, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.__init__", "line_number": 90, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 90, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.close", "line_number": 104, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 104, "usage_type": "name"}, {"api_name": "sys.version_info", "line_number": 118, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection._tunnel", "line_number": 119, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 119, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 133, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection._send_request", "line_number": 149, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 149, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 160, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 160, "usage_type": "name"}, {"api_name": "select.select", "line_number": 183, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 246, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection.send", "line_number": 262, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.HTTPConnection", "line_number": 262, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connection.VerifiedHTTPSConnection", "line_number": 272, "usage_type": "name"}, {"api_name": "inspect.isfunction", "line_number": 280, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 307, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 335, "usage_type": "call"}, {"api_name": "kscore.compat.urlunsplit", "line_number": 352, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.RequestEncodingMixin", "line_number": 356, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 356, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.models.Request", "line_number": 356, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models.Request.__init__", "line_number": 362, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.Request", "line_number": 362, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 362, "usage_type": "name"}, {"api_name": "kscore.compat.HTTPHeaders", "line_number": 363, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models.PreparedRequest", "line_number": 391, "usage_type": "call"}, {"api_name": "kscore.vendored.requests.models", "line_number": 391, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 394, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 394, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.models.PreparedRequest", "line_number": 399, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.models", "line_number": 399, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.sessions.REDIRECT_STATI", "line_number": 422, "usage_type": "name"}, {"api_name": "kscore.compat.six.text_type", "line_number": 436, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 436, "usage_type": "name"}, {"api_name": "kscore.compat.six.binary_type", "line_number": 437, "usage_type": "attribute"}, {"api_name": "kscore.compat.six", "line_number": 437, "usage_type": "name"}, {"api_name": "kscore.exceptions.UnseekableStreamError", "line_number": 444, "usage_type": "call"}, {"api_name": "kscore.compat.json", "line_number": 448, "usage_type": "argument"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPSConnectionPool.ConnectionCls", "line_number": 468, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPSConnectionPool", "line_number": 468, "usage_type": "name"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPConnectionPool.ConnectionCls", "line_number": 469, "usage_type": "attribute"}, {"api_name": "kscore.vendored.requests.packages.urllib3.connectionpool.HTTPConnectionPool", "line_number": 469, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 472, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 496, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 556, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 556, "usage_type": "argument"}, {"api_name": "hmac.new", "line_number": 558, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 558, "usage_type": "argument"}, {"api_name": "kscore.compat.HTTPHeaders", "line_number": 566, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 567, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 584, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 590, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 591, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 634, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 636, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 645, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 651, "usage_type": "call"}, {"api_name": "kscore.compat.quote", "line_number": 665, "usage_type": "call"}, {"api_name": "kscore.utils.normalize_url_path", "line_number": 665, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 693, "usage_type": "call"}, {"api_name": "kscore.exceptions.NoCredentialsError", "line_number": 707, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 708, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 708, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 746, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 746, "usage_type": "attribute"}, {"api_name": "email.utils.formatdate", "line_number": 748, "usage_type": "call"}, {"api_name": "calendar.timegm", "line_number": 749, "usage_type": "call"}, {"api_name": "kscore.compat.urlsplit", "line_number": 796, "usage_type": "call"}, {"api_name": "kscore.compat.parse_qs", "line_number": 801, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 817, "usage_type": "call"}, {"api_name": "kscore.utils.percent_encode_sequence", "line_number": 819, "usage_type": "call"}, {"api_name": "kscore.compat.urlunsplit", "line_number": 830, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 865, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 865, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 894, "usage_type": "call"}, {"api_name": "kscore.compat.json.dumps", "line_number": 895, "usage_type": "call"}, {"api_name": "kscore.compat.json", "line_number": 895, "usage_type": "name"}]}
+{"seq_id": "131248341", "text": "#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n\n# !/usr/bin/env python\n\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom model.test import im_detect_feat\n\nfrom layer_utils.roi_layers import nms\n\nfrom utils.timer import Timer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, cv2\nimport argparse\nimport json\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1, resnet101\nfrom multiprocessing import Process\n\nimport torch\n\nimport pdb\n\nCLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',\n 'tvmonitor')\n\nNETS = {\n 'vgg16': ('vgg16_faster_rcnn_iter_%d.pth',),\n 'res101': ('res101_faster_rcnn_iter_%d.pth',)\n}\nDATASETS = {\n 'pascal_voc': ('voc_2007_trainval',),\n 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)\n}\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\n\ndef vis_detections(im, class_name, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1],\n fill=False,\n edgecolor='red',\n linewidth=3.5))\n ax.text(\n bbox[0],\n bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14,\n color='white')\n\n ax.set_title(\n ('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n\n\ndef demo(net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(\n timer.total_time(), boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n vis_detections(im, cls, dets, thresh=CONF_THRESH)\n\n\ndef parse_args():\n \"\"\"Parse input arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description='Tensorflow Faster R-CNN demo')\n parser.add_argument(\n '--net',\n dest='demo_net',\n help='Network to use [vgg16 res101]',\n choices=NETS.keys(),\n default='res101')\n parser.add_argument(\n '--dataset',\n dest='dataset',\n help='Trained dataset [pascal_voc pascal_voc_0712]',\n choices=DATASETS.keys(),\n default='pascal_voc_0712')\n args = parser.parse_args()\n\n return args\n\n\ndef load_image_ids(split_name):\n ''' Load a list of (path,image_id tuples). Modify this to suit your data locations. '''\n split = []\n base_dir = '/DATA/disk1/zhangming6/Datasets/AI_Challenger_2017/caption/raw_data/train_20170902'\n\n if split_name == 'coco_test2014':\n with open('/data/coco/annotations/image_info_test2014.json') as f:\n data = json.load(f)\n for item in data['images']:\n image_id = int(item['id'])\n filepath = os.path.join('/data/test2014/', item['file_name'])\n split.append((filepath, image_id))\n elif split_name == 'coco_test2015':\n with open('/data/coco/annotations/image_info_test2015.json') as f:\n data = json.load(f)\n for item in data['images']:\n image_id = int(item['id'])\n filepath = os.path.join('/data/test2015/', item['file_name'])\n split.append((filepath, image_id))\n elif split_name == 'genome':\n with open('/data/visualgenome/image_data.json') as f:\n for item in json.load(f):\n image_id = int(item['image_id'])\n filepath = os.path.join('/data/visualgenome/', item['url'].split('rak248/')[-1])\n split.append((filepath, image_id))\n elif split_name == 'chinese_train':\n with open(base_dir + '/caption_train_annotations_20170902.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_train_images_20170902', image_id)\n split.append((filepath, image_id))\n elif split_name == 'chinese_val':\n with open(base_dir + '/caption_validation_annotations_20170910.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_validation_images_20170910', image_id)\n split.append((filepath, image_id))\n elif split_name == 'chinese_test1':\n with open(base_dir + '/caption_test1_annotations_20170923.json') as f:\n for item in json.load(f):\n image_id = item['image_id']\n filepath = os.path.join(base_dir + '/caption_test1_images_20170923', image_id)\n split.append((filepath, image_id))\n else:\n print\n 'Unknown split'\n return split\n\n\ndef feature_gen(net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n im = cv2.imread(im_file)\n\n scores, boxes, pool5 = im_detect(net, im)\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n pool5_select = pool5[keep.numpy(), :]\n # path = os.path.abspath(os.path.dirname(__file__)+'/../data/test/')\n path = 'demo_res/'\n np.save(path + 'fc.npy', pool5_select.mean(0))\n np.savez_compressed(path + 'att.npz', feat=pool5_select)\n np.save(path + 'box.npy', dets)\n\n print('Done!')\n\n\ndef feature_gen_multi(net, image_list, outpath):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n count = 0\n sum = len(image_list)\n for img_file, img_id in image_list:\n im_file = os.path.join(img_file)\n im = cv2.imread(im_file)\n\n scores, boxes, pool5 = im_detect(net, im)\n\n CONF_THRESH = 0.8\n NMS_THRESH = 0.3\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(\n torch.from_numpy(cls_boxes), torch.from_numpy(cls_scores),\n NMS_THRESH)\n dets = dets[keep.numpy(), :]\n pool5_select = pool5[keep.numpy(), :]\n\n np.save(outpath + 'chinese_bu_fc/' + img_id + '.npy', pool5_select.mean(0))\n np.savez_compressed(outpath + 'chinese_bu_att/' + img_id + '.npz', feat=pool5_select)\n np.save(outpath + 'chinese_bu_box/' + img_id + '.npy', dets)\n\n count += 1\n if count % 100 == 0:\n print('{}/{}:{:.2f}%'.format(count, sum, (count / sum) * 100))\n\n print('Done!')\n\n\ndef single_img(net):\n im_names = [\n 'a2af7deaa01abca741477820bbf37b340df02a88.jpg'\n # 'test_wave.jpg'\n ]\n for im_name in im_names:\n print('*' * 26)\n print('Demo for data/demo/{}'.format(im_name))\n # demo(net, im_name)\n feature_gen(net, im_name)\n\n\ndef multi_img(net):\n split_num = 2\n image_ids = load_image_ids('chinese_train')\n # Split image ids between gpus\n image_ids_split = [image_ids[i::split_num] for i in range(split_num)]\n\n procs = []\n outfile = '/DATA/disk1/zhangming6/Datasets/AI_Challenger_2017/caption/bottom_up_zm/'\n\n multi_process = False\n if multi_process: # 暂不可用\n for i in range(split_num):\n p = Process(target=feature_gen_multi,\n args=(i, net, image_ids_split[i], outfile))\n p.daemon = True\n p.start()\n procs.append(p)\n for p in procs:\n p.join()\n else:\n feature_gen_multi(net, image_ids, outfile)\n\n\nif __name__ == '__main__':\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n args = parse_args()\n\n # model path\n demonet = args.demo_net\n dataset = args.dataset\n saved_model = os.path.join(\n 'output', demonet, DATASETS[dataset][0], 'default',\n NETS[demonet][0] % (70000 if dataset == 'pascal_voc' else 110000))\n\n if not os.path.isfile(saved_model):\n raise IOError(\n ('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(saved_model))\n\n # load network\n\n if demonet == 'vgg16':\n net = vgg16()\n elif demonet == 'res101':\n net = resnetv1(num_layers=101)\n else:\n raise NotImplementedError\n net.create_architecture(21, tag='default', anchor_scales=[8, 16, 32])\n\n net.load_state_dict(\n torch.load(saved_model, map_location=lambda storage, loc: storage))\n\n # net = resnet101(True)\n\n net.eval()\n if not torch.cuda.is_available():\n net._device = 'cpu'\n net.to(net._device)\n\n print('Loaded network {:s}'.format(saved_model))\n\n # single_img(net)\n multi_img(net)\n", "sub_path": "tools/chinese_feature_gen.py", "file_name": "chinese_feature_gen.py", "file_ext": "py", "file_size_in_byte": 11110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.config.cfg.DATA_DIR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 95, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.timer.Timer", "line_number": 99, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 116, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "json.load", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "model.config.cfg.DATA_DIR", "line_number": 196, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 196, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 197, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 208, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 230, "usage_type": "call"}, {"api_name": "model.test.im_detect", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 241, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 241, "usage_type": "attribute"}, {"api_name": "layer_utils.roi_layers.nms", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 250, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 283, "usage_type": "call"}, {"api_name": "model.config.cfg.TEST", "line_number": 295, "usage_type": "attribute"}, {"api_name": "model.config.cfg", "line_number": 295, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 305, "usage_type": "call"}, {"api_name": "os.path", "line_number": 305, "usage_type": "attribute"}, {"api_name": "nets.vgg16.vgg16", "line_number": 313, "usage_type": "call"}, {"api_name": "nets.resnet_v1.resnetv1", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 326, "usage_type": "attribute"}]}
+{"seq_id": "337235534", "text": "# This file is part of Indico.\n# Copyright (C) 2002 - 2019 CERN\n#\n# Indico is free software; you can redistribute it and/or\n# modify it under the terms of the MIT License; see the\n# LICENSE file for more details.\n\nfrom __future__ import unicode_literals\n\nfrom flask import render_template\n\nfrom indico.core.notifications import email_sender, make_email\n\n\n@email_sender\ndef notify_amount_inconsistency(registration, amount, currency):\n event = registration.registration_form.event\n to = event.creator.email\n body = render_template('events/payment/emails/payment_inconsistency_email_to_manager.txt',\n event=event, registration=registration, amount=amount, currency=currency)\n return make_email(to, subject='Payment inconsistency', body=body)\n", "sub_path": "indico/modules/events/payment/notifications.py", "file_name": "notifications.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "indico.core.notifications.make_email", "line_number": 21, "usage_type": "call"}, {"api_name": "indico.core.notifications.email_sender", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "313463747", "text": "from sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nimport winsound as ws\nimport numpy as np\n\nfrom utils import TrainModule\n\nnp_loader = np.load(\"TFSR_80_n_mfcc.npz\")\nx_data, y_data = np.expand_dims(np_loader[\"x_norm_data\"], axis=-1), to_categorical(np_loader[\"y_data\"])\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, shuffle=True)\n\ntm_mfcc = TrainModule(result_file_name=\"TFSR_80_n_mfcc_training_result_conv2d\",\n input_shape=np.shape(x_train)[1:],\n output_shape=np.shape(y_train)[1]\n )\n\nmodel1 = tm_mfcc.create_conv2d_model()\n\nckpt_path = CHECKPOINT_PATH\nmodel_path = MODEL_SAVE_PATH\n\ntm_mfcc.training(\n model=model1,\n x_train=x_train,\n y_train=y_train,\n ckpt_path=ckpt_path,\n model_path=model_path,\n x_test=x_test,\n y_test=y_test\n)\n\nws.Beep(2000, 1000)\n", "sub_path": "training_mfcc_2d.py", "file_name": "training_mfcc_2d.py", "file_ext": "py", "file_size_in_byte": 926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.load", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.TrainModule", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 14, "usage_type": "call"}, {"api_name": "winsound.Beep", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "88179582", "text": "import torch.utils.data\n\nfrom vision3d.datasets import ModelNet40Dataset\nimport vision3d.transforms.functional as F\nfrom vision3d.utils.pytorch_utils import reset_numpy_random_seed\n\n\nclass TrainTransform(object):\n def __init__(self, num_point, sigma, low, high):\n self.num_point = num_point\n self.sigma = sigma\n self.low = low\n self.high = high\n\n def __call__(self, points):\n points = F.sample_point_cloud(points, self.num_point)\n points = F.random_shuffle_point_cloud(points)\n points = F.random_rescale_point_cloud(points, self.low, self.high)\n points = F.random_jitter_point_cloud(points, self.sigma)\n points = points.transpose()\n points = torch.tensor(points, dtype=torch.float)\n return points\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(\\n'\n format_string += ' SamplePointCloud(num_point={})\\n'.format(self.num_point)\n format_string += ' RandomShufflePointCloud()\\n'\n format_string += ' RandomRescalePointCloud(low={}, high={})\\n'.format(self.low, self.high)\n format_string += ' RandomJitterPointCloud(sigma={})\\n'.format(self.sigma)\n format_string += ')'\n return format_string\n\n\nclass TestTransform(object):\n def __init__(self, num_point):\n self.num_point = num_point\n\n def __call__(self, points):\n points = F.sample_point_cloud(points, self.num_point)\n points = points.transpose()\n points = torch.tensor(points, dtype=torch.float)\n return points\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(\\n'\n format_string += ' SamplePointCloud(num_point={})\\n'.format(self.num_point)\n format_string += ')'\n return format_string\n\n\ndef train_data_loader(config):\n train_transform = TrainTransform(config.train_num_point,\n config.train_jitter_sigma,\n config.train_rescale_low,\n config.train_rescale_high)\n train_dataset = ModelNet40Dataset(config.data_root, 'train', train_transform)\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n num_workers=config.train_num_worker,\n pin_memory=True,\n drop_last=True,\n worker_init_fn=reset_numpy_random_seed)\n return train_loader\n\n\ndef test_data_loader(config):\n test_transform = TestTransform(config.test_num_point)\n test_dataset = ModelNet40Dataset(config.data_root, 'test', test_transform)\n test_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=config.test_batch_size,\n num_workers=config.test_num_worker,\n worker_init_fn=reset_numpy_random_seed)\n return test_loader\n\n\nif __name__ == '__main__':\n from config import config\n\n data_loader = train_data_loader(config)\n for i, (x, y) in enumerate(data_loader):\n print(i, ': ', x.shape, y.shape)\n\n data_loader = test_data_loader(config)\n for i, (x, y) in enumerate(data_loader):\n print(i, ': ', x.shape, y.shape)\n", "sub_path": "experiments/pointnet.modelnet40.resize+jitter.adam.tnet.smooth/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 3505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "vision3d.transforms.functional.sample_point_cloud", "line_number": 16, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 16, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_shuffle_point_cloud", "line_number": 17, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 17, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_rescale_point_cloud", "line_number": 18, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 18, "usage_type": "name"}, {"api_name": "vision3d.transforms.functional.random_jitter_point_cloud", "line_number": 19, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.utils.data.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.utils.data.float", "line_number": 21, "usage_type": "attribute"}, {"api_name": "vision3d.transforms.functional.sample_point_cloud", "line_number": 39, "usage_type": "call"}, {"api_name": "vision3d.transforms.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.utils.data.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.utils.data.float", "line_number": 41, "usage_type": "attribute"}, {"api_name": "vision3d.datasets.ModelNet40Dataset", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 57, "usage_type": "name"}, {"api_name": "vision3d.utils.pytorch_utils.reset_numpy_random_seed", "line_number": 63, "usage_type": "name"}, {"api_name": "vision3d.datasets.ModelNet40Dataset", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 70, "usage_type": "name"}, {"api_name": "vision3d.utils.pytorch_utils.reset_numpy_random_seed", "line_number": 73, "usage_type": "name"}, {"api_name": "config.config", "line_number": 80, "usage_type": "argument"}, {"api_name": "config.config", "line_number": 84, "usage_type": "argument"}]}
+{"seq_id": "96949522", "text": "from django.urls import path\nfrom .views.shift_views import (\n ShiftCreateView,\n ShiftDetailView,\n ShiftUpdateView,\n ShiftListView,\n)\nfrom .views.roster_view import(\n RosterCreateView,\n)\napp_name = 'eattendance'\nurlpatterns = [\n # shift urls\n path('shift/create/', ShiftCreateView.as_view(), name='shift-create'),\n path('shift//', ShiftDetailView.as_view(), name='shift-detail'),\n path('shift//update', ShiftUpdateView.as_view(),name='shift-update'),\n path('shifts/', ShiftListView.as_view(), name='shifts'),\n # roster urls\n path('roster/create/', RosterCreateView.as_view(), name='roster-create'),\n]", "sub_path": "Swasthya/eattendance/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftCreateView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftCreateView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftDetailView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftDetailView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftUpdateView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftUpdateView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftListView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.shift_views.ShiftListView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.roster_view.RosterCreateView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.roster_view.RosterCreateView", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "230274000", "text": "import pika\n\nEXCHANGE_NAME = 'test-exchange'\nQUEUE_NAME = 'test-queue'\n\ndef init_rabbitmq():\n conn = pika.BlockingConnection()\n chan = conn.channel()\n\n chan.exchange_declare(EXCHANGE_NAME, 'direct')\n chan.queue_declare(QUEUE_NAME, durable=True)\n chan.queue_bind(QUEUE_NAME, EXCHANGE_NAME, \"routing.key\")\n\n conn.close()\n\n\nclass Producer(object):\n def __init__(self, conn):\n self.conn = conn\n\n def send_message(self, msg, exch, rtg_key):\n chan = self.conn.channel()\n chan.basic_publish(exch, rtg_key, msg)\n chan.close()\n\n\nclass Consumer(object):\n def __init__(self, conn):\n self.conn = conn\n\n def get_message(self, queue):\n chan = self.conn.channel()\n frame, _, body = chan.basic_get(queue)\n if frame:\n chan.basic_ack(frame.delivery_tag)\n return body\n\n\ndef hello_world():\n init_rabbitmq()\n\n conn = pika.BlockingConnection()\n\n p = Producer(conn)\n\n p.send_message(\"Hello world!\", EXCHANGE_NAME, 'routing.key')\n c = Consumer(conn)\n\n print(c.get_message(QUEUE_NAME))\n\n\nif __name__ == \"__main__\":\n hello_world()\n", "sub_path": "example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 1055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pika.BlockingConnection", "line_number": 7, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 42, "usage_type": "call"}]}
+{"seq_id": "643500346", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n## Import the related Libraries\nimport numpy as np\nimport statsmodels.api as sm ## OLS\nimport pandas as pd\nfrom scipy import stats\nfrom random import sample\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport sys\n\n## Load the data\nrawdata = np.genfromtxt(sys.argv[1], skip_header=1)\nX = rawdata[:, :-1]\ny = rawdata[:, -1]\n\ndef MLR(data, flag):\n X = data[:, :-1]\n y = data[:, -1]\n if flag == 1:\n ###### Ordinary least squares ######\n X2 = sm.add_constant(X) # for intercept\n est = sm.OLS(y, X2)\n est2 = est.fit()\n print(est2.summary())\n else:\n ###### Sklearn Linear regression ######\n Reg = LinearRegression()\n Reg.fit(X, y)\n params = np.append(Reg.intercept_, Reg.coef_)\n y_hat = Reg.predict(X)\n newX = np.append(np.ones((len(X), 1)), X, axis = 1)\n\n ## including intercept for matrix calculation\n MSE = (sum((y - y_hat)**2)) / (len(newX)-len(newX[0]))\n\n var_beta = MSE * (np.linalg.inv(np.dot(newX.T, newX)).diagonal())\n s_beta = np.sqrt(var_beta)\n t_beta = params / s_beta\n\n p_values = [2 * (1 - stats.t.cdf(np.abs(t), ( len(newX) - len(newX[0]) - 1))) for t in t_beta]\n\n # 반올림 작업.\n sd_b = np.round(s_beta, 3) ## Std.Errors of Coefficient\n ts_b = np.round(t_beta, 3) ## t-value\n p_values = np.round(p_values, 6) ## P-value\n params = np.round(params, 4) ## Coefficients\n\n R_squared = r2_score(y, y_hat)\n\n # Result table\n Result = pd.DataFrame()\n Result[\"Coefficients\"], Result[\"Std Error\"], Result[\"t values\"], Result[\"P-value\"], Result[\"MSE\"], Result[\"R-squared\"] = [params, sd_b, ts_b, p_values, MSE, R_squared]\n print(Result)\n return None\n\nMLR(X, int(sys.argv[2]))\n", "sub_path": "Exercise-6/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.genfromtxt", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "statsmodels.api.add_constant", "line_number": 24, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 24, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 25, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.stats.t.cdf", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 43, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 59, "usage_type": "attribute"}]}
+{"seq_id": "546804568", "text": "# Dieses Skript Tauscht den HTML Code zwischen Orignal Anfrage und Proxy Abfrage aus\n# Skript in Kombination mit Firefox verwenden\n# Wir müssen jedoch noch die Proxy Einstellungen im Browser anpassen: Manuell Proxy Config \"127.0.0.1\", 7654\n# damit unsere Anfrage auf unseren Proxy Server umgeleitet wird\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nfrom socketserver import ThreadingMixIn # Für Verbesserung der Perfomance\n\nimport requests\nimport random\nimport urllib\n\n# Vererbung:Nimm gesamten Rahmen von BaseHTTPReqestHandler aber tausche ein paar Sachen aus\nclass MyRequestHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n print(self.path)\n print(self.headers) # Header wenn der Browser dem Proxy Daten schickt\n if self.headers[\"content-type\"] == \"application/x-www-form-urlencoded\": # Wenne es sich um Typ Formular handelt\n length = int(self.headers[\"content-length\"]) # Länge des Formularinhalts als integer ermitteln\n print(length)\n read_form_raw = str(self.rfile.read(length), \"utf-8\") # Formulardaten lesen (raw)\n data = urllib.parse.parse_qs(read_form_raw) # Raw Formular zerlegen in strukturierte Form (dict) umwandeln\n\n with requests.post(self.path, data=data, stream=True) as res: # Schicke Post Requests an Server mit Formulardaten data ,welche wir gerade eben auzsgelesen haben\n\n self.send_response(res.status_code) # ABSOLUT NOTWENDIGE ZEILE. Statuscode muss immer an Browser mitgeteilt werden. Weiterleiten den Angefragten Pfades vom Broswser\n # Headers 1 zu 1 an Browser weiterleiten\n #print(res.headers) # res.headers ist ein Dictionary\n for key, value in res.headers.items(): # Auflösung Dictionary\n self.send_header(key, value)\n self.end_headers()\n\n # Informationen an unseren Browser schicken. Geht nur in Byteform -> Daher wird Str encoded in Bytes\n self.wfile.write(res.raw.read()) # Gibt die Rohdaten die von der Seite gesendet wurden weiter an Browser\n\n def do_GET(self):\n\n if self.path[-4:] == \".jpg\": # Nur wenn folgende Dateiendeung\n\n # Für anderes Bild\n self.send_response(200)\n self.send_header(\"Content-Type\", \"image/jpeg\") # Text\n self.end_headers()\n\n images = [\"./Bilder/1.jpg\", \"./Bilder/2.jpg\"]\n\n with open(random.choice(images), \"rb\") as file:\n self.wfile.write(file.read()) # Text\n\n else:\n # Einrücken Notwendig, damit wir kein Memory Leak haben und damit wir in Variable res zusätzliche eigenschaft haben um auf Rohdaten (stream) zugreifen zu können\n with requests.get(self.path, stream=True) as res: # Herunterladen des angefragten Pfades\n\n self.send_response(res.status_code) # Weiterleiten den Angefragten Pfades vom Broswser\n\n print(res.headers) # res Headers -> Original Server Headers an Proxy die (als Dictionary)\n if \"text/html\" in res.headers[\"content-type\"]: # Wenn es sich um html Datei handelt\n self.send_header(\"Content-Type\", \"text/html\") # Bezieht sich auf die Headers die unser Proxy an den Browser schickt.\n print(res.content) # Enthält Originalinhalt was Server geantwortet hat\n content = str(res.content, \"utf-8\") # Interne Übergabe als String mit utf-8 Format\n content = content.replace(\"Bilder\", \"Katzenbilder\") # Ersetzt in HTML das Wort Bilder durch Katzenbilder\n #self.wfile.write(res.content, encode()) # Senden des Originalinhaltes\n self.wfile.write(content, encode()) # Senden unserer Message\n\n else:\n # Headers 1 zu 1 an Browser weiterleiten\n #print(res.headers) # res.headers ist ein Dictionary\n for key, value in res.headers.items(): # Auflösung Dictionary\n self.send_header(key, value)\n self.end_headers()\n\n # Informationen an unseren Browser schicken. Geht nur in Byteform -> Daher wird Str encoded in Bytes\n self.wfile.write(res.raw.read()) # Gibt die Rohdaten die von der Seite gesendet wurden weiter an Browser\n\n# Optimierung -> Kombination aus ThreadMixIn, HTTPServer (Mehrfachvererbung)\nclass ThreadingHTTPServer(ThreadingMixIn, HTTPServer): #\n pass\n\naddress = (\"127.0.0.1\", 7654) # IP Adresse (entsprechend dem Computer auf dem der Server läuft) und Port -> http://127.0.0.1:7654\n\nserver = ThreadingHTTPServer(address, MyRequestHandler) # ThreadingHTTP Server Adresse zuweisen, und verhalte dich entsprechend MyRequestHandler\nserver.serve_forever() # Server Starten und halte diesen am laufen", "sub_path": "01_Tutorials/Udemy Kurs Ehical Hacking/09_Praxis_MITM_mit_HTTP-Proxy/105_HTTP_Proxy_Server_Fomular_auslesen.py", "file_name": "105_HTTP_Proxy_Server_Fomular_auslesen.py", "file_ext": "py", "file_size_in_byte": 4838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 14, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qs", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 23, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "socketserver.ThreadingMixIn", "line_number": 77, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "463870255", "text": "from django.db import IntegrityError\n\nfrom rest_framework import generics, status, views\nfrom rest_framework.response import Response\n\nfrom videos.models import Video\nfrom videos.permissions import VideoViewPermissions\nfrom videos.serializers import VideoSerializer, CreateVideoSerializer\n\nfrom eswrapper.mixins import ESPaginationMixin\n\n\nclass ListVideos(generics.ListCreateAPIView):\n\n queryset = Video.objects.all()\n serializer_class = VideoSerializer\n permission_classes = (VideoViewPermissions, )\n\n def post(self, request, *args, **kwargs):\n serializer = CreateVideoSerializer(data=request.data)\n if not serializer.is_valid():\n return Response(status=status.HTTP_400_BAD_REQUEST)\n try:\n v = Video.objects.create(**serializer.data)\n return Response(VideoSerializer(v).data, status=status.HTTP_201_CREATED)\n except IntegrityError:\n return Response(status=status.HTTP_409_CONFLICT)\n\n\nclass VideoDetail(generics.RetrieveUpdateDestroyAPIView):\n\n queryset = Video.objects.all()\n serializer_class = VideoSerializer\n permission_classes = (VideoViewPermissions, )\n lookup_url_kwarg = 'video_pk'\n\n\nclass ESVideoList(ESPaginationMixin, views.APIView):\n\n def get(self, request, *args, **kwargs):\n qs = Video.es_objects.all()\n resp = self.esresp(Video.objects.count(), qs)\n return Response(resp, status=status.HTTP_200_OK)\n", "sub_path": "trickapi/api/video_api.py", "file_name": "video_api.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 13, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 15, "usage_type": "name"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 16, "usage_type": "name"}, {"api_name": "videos.permissions.VideoViewPermissions", "line_number": 17, "usage_type": "name"}, {"api_name": "videos.serializers.CreateVideoSerializer", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 25, "usage_type": "call"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 30, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.all", "line_number": 32, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 32, "usage_type": "name"}, {"api_name": "videos.serializers.VideoSerializer", "line_number": 33, "usage_type": "name"}, {"api_name": "videos.permissions.VideoViewPermissions", "line_number": 34, "usage_type": "name"}, {"api_name": "eswrapper.mixins.ESPaginationMixin", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.views", "line_number": 38, "usage_type": "name"}, {"api_name": "videos.models.Video.es_objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "videos.models.Video.es_objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 41, "usage_type": "name"}, {"api_name": "videos.models.Video.objects.count", "line_number": 42, "usage_type": "call"}, {"api_name": "videos.models.Video.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "videos.models.Video", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}]}
+{"seq_id": "237869222", "text": "#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\n/***************************************************************************\n *\n * Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved\n * @file: seg_predict_cpu.py\n * @date 2021/5/8 2:28 PM\n * @brief \n *\n **************************************************************************/\n\"\"\"\nimport paddlehub as hub\nimport cv2\nimport os\nimport shutil\n\npwd = os.getcwd()\nmodels_save = os.path.join(pwd, 'models_save')\npwd_last = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\nimg_data = os.path.join(pwd_last, 'img_data')\nresults = os.path.join(pwd, 'results')\nif os.path.exists(results):\n shutil.rmtree(results)\n\npic_list = ['car.jpeg', 'det_03.jpg', 'small_bike.jpg', 'det_02.jpeg']\n\nfor pic in pic_list:\n model = hub.Module(\n name='ocrnet_hrnetw18_voc',\n pretrained=os.path.join(models_save, 'ocrnet_hrnetw18_voc', 'epoch_2',\n 'model.pdparams'))\n img = cv2.imread(os.path.join(img_data, pic))\n model.predict(images=[img], visualization=True, save_path=results)\n\nassert len(os.listdir(os.path.join(results, 'image'))) == len(pic_list)\nassert len(os.listdir(os.path.join(results, 'mask'))) == len(pic_list)\n", "sub_path": "ce_cloud_models/PaddleHub/SEG/linux/scripts/hub_ocrnet_hrnetw18_voc/seg_predict_cpu.py", "file_name": "seg_predict_cpu.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.getcwd", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 25, "usage_type": "call"}, {"api_name": "paddlehub.Module", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}]}
+{"seq_id": "614184426", "text": "# https://www.acmicpc.net/problem/2178\r\n# 미로 찾기\r\n\r\n'''\r\nBFS 의 특징은 각 정점을 최단경로로 방문한다는 것.\r\n'''\r\nimport sys\r\nfrom collections import deque\r\n\r\ndef bfs(N, M, ones):\r\n dist = [[0] * M for _ in range(N)]\r\n queue = deque()\r\n check = []\r\n\r\n queue.append(ones[0])\r\n\r\n while queue:\r\n node = queue.popleft()\r\n r, c = node\r\n \r\n if r == N - 1 and c == M - 1:\r\n return dist[N - 1][M - 1]\r\n \r\n if (r + 1, c) in ones and (r + 1, c) not in check:\r\n queue.append((r + 1, c))\r\n check.append((r + 1, c))\r\n dist[r + 1][c] = dist[r][c] + 1\r\n\r\n if (r - 1, c) in ones and (r - 1, c) not in check:\r\n queue.append((r - 1, c))\r\n check.append((r - 1, c))\r\n dist[r - 1][c] = dist[r][c] + 1\r\n\r\n if (r, c + 1) in ones and (r, c + 1) not in check:\r\n queue.append((r, c + 1))\r\n check.append((r, c + 1))\r\n dist[r][c + 1] = dist[r][c] + 1\r\n\r\n if (r, c - 1) in ones and (r, c - 1) not in check:\r\n queue.append((r, c - 1))\r\n check.append((r, c - 1))\r\n dist[r][c - 1] = dist[r][c] + 1\r\n\r\n return dist[N - 1][M - 1]\r\n\r\n\r\nN, M = map(int, sys.stdin.readline().split())\r\n\r\nmat = []\r\nones = []\r\nfor i in range(N):\r\n tmp = list(sys.stdin.readline().strip())\r\n line = list(map(int, tmp))\r\n mat.append(line)\r\n for j in range(len(line)):\r\n if line[j] == 1:\r\n ones.append((i, j))\r\n\r\n\r\nprint(bfs(N, M, ones) + 1)", "sub_path": "DFS_BFS/2178.py", "file_name": "2178.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 52, "usage_type": "attribute"}]}
+{"seq_id": "332053206", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/mad/Documents/spike/spike/Algo/Cadzow_mpi2.py\n# Compiled at: 2017-08-31 16:40:33\n# Size of source mod 2**32: 7772 bytes\n\"\"\"\nCreated by Marc-André Delsuc and Lionel Chiron on 2011-07\nCopyright (c) 2010 IGBMC. All rights reserved.\n\nCadzow in MPI mode\ncomplete rewrite from the code from Cyrille Bonamy for the MPI part\n\ncode compatible avec la version 0.4.0 de NPK\n\nThresholding to make Cadzow on the main relevant columns.\n\nnote that the cadzow algo is multithreaded if running over the MKL library.\nSo if MKL is installed, run only on instance per node, as all cores from the node will be solicited.\n\"\"\"\nfrom __future__ import print_function\nimport sys, numpy as np\nimport util.mpiutil as mpiutil\nimport util.progressbar as pg\nimport tables, time, urQRd, Cadzow\nfrom spike.NPKData import NPKData, copyaxes\nfrom spike.FTICR import FTICRData\nimport spike.File.HDF5File as HDF5File\nimport spike.NPKConfigParser as NPKConfigParser\ndebug = False\n\ndef Set_Table_Param():\n tables.parameters.CHUNK_CACHE_PREEMPT = 1\n tables.parameters.CHUNK_CACHE_SIZE = 104857600\n tables.parameters.METADATA_CACHE_SIZE = 104857600\n tables.parameters.NODE_CACHE_SLOTS = 104857600\n\n\ndef selectcol(data, limitpts, nbrows=200):\n \"\"\"\n returns a list of index of the limitpts largest columns of the 2D 'data'\n \n first averaging on nbrows rows\n \n return index list\n \"\"\"\n if debug:\n print('averaging on ', nbrows, ' rows ')\n else:\n roughft2 = data.row(0)\n if roughft2.axis1.itype == 1:\n roughft2.modulus()\n else:\n roughft2.abs()\n for i in range(min(nbrows, data.size1)):\n rr = data.row(i)\n if rr.axis1.itype == 1:\n rr.modulus()\n else:\n rr.abs()\n roughft2.add(rr)\n\n roughft2.mult(1.0 / nbrows)\n n = roughft2.size1 * 0.1\n roughft2.buffer[0:n] = 0.0\n index = find_thres(roughft2, limitpts=limitpts)\n if debug:\n roughft2.display()\n disp = NPKData(buffer=(np.zeros(roughft2.size1)))\n disp.buffer[index] = roughft2.buffer[index]\n disp.display(show=True)\n return index\n\n\ndef find_thres(b, limitpts):\n \"\"\"\n returns a list of index of the limitpts largest points in the 1D data 'b' \n \"\"\"\n thresh = max(b.buffer) + 1.0\n nbpts = 0\n count = 0\n inter = b.buffer.copy()\n while abs(nbpts - limitpts) / float(limitpts) > 0.1:\n if debug:\n print('thresh : ', thresh)\n else:\n nbpts = (inter > thresh).sum()\n inter[inter < thresh] = 0\n if debug:\n print('nbpts', nbpts, 'count ', count)\n count += 1\n if nbpts < limitpts:\n c = inter\n threshold = thresh\n if debug:\n print('threshold ', threshold)\n thresh /= 2.0\n ind = np.where(c > 0)[0]\n else:\n if debug:\n print('treshold min = ', thresh)\n thresh = (threshold + thresh) / 2.0\n if debug:\n print('nouveau threshold ', thresh)\n inter = np.copy(b.buffer)\n if debug:\n print('au dessus thresh ', (inter > thresh).sum())\n if debug:\n print('=0 ', (inter == 0).sum())\n\n return ind\n\n\ndef load_input(name):\n \"\"\"load input file and returns it, in read-only mode\"\"\"\n if debug > 0:\n print('reading', name)\n hf = HDF5File(name, 'r')\n d0 = hf.load()\n return d0\n\n\ndef iterarg(xindex, dinp, n_of_line, n_of_iter, orda):\n \"\"\"an iterator used by the MPI set-up\"\"\"\n for i in xindex:\n c0 = dinp.col(i)\n if debug:\n print(c0.buffer, n_of_line, n_of_iter, orda)\n yield (\n c0.buffer, n_of_line, n_of_iter, orda)\n\n\ndef cadz(args):\n \"\"\"utility function\"\"\"\n if debug:\n print(args)\n return (Cadzow.cadzow)(*args)\n\n\ndef rqr(args):\n \"\"\"utility function\"\"\"\n if debug:\n print(args)\n argu = (\n args[0], args[1], args[3])\n return (urQRd.urQRd)(*argu)\n\n\ndef main():\n \"\"\"does the whole job,\n if we are running in MPI, this is only called by job #0\n all other jobs are running mpi.slave()\n \"\"\"\n argv = sys.argv\n if len(argv) != 2:\n print('\\nsyntax is :\\n(mpirun -np N) python program configfile.mscf\\n')\n sys.exit(1)\n else:\n configfile = argv[1]\n cp = NPKConfigParser()\n cp.readfp(open(configfile))\n infile = cp.getword('Cadzow', 'namein')\n print('infile', infile)\n outfile = cp.getword('Cadzow', 'nameout')\n print('outfile', outfile)\n algo = cp.getword('Cadzow', 'algorithm')\n print('algorithm', algo)\n n_of_line = cp.getint('Cadzow', 'n_of_lines', 70)\n print('n_of_line', n_of_line)\n n_of_iter = cp.getint('Cadzow', 'n_of_iters', 1)\n print('n_of_iter', n_of_iter)\n orda = cp.getint('Cadzow', 'order', 500)\n print('order', orda)\n n_of_column = cp.getint('Cadzow', 'n_of_column', 100)\n print('n_of_column', n_of_column)\n progress = cp.getboolean('Cadzow', 'progress', True)\n d0 = load_input(infile)\n d0.check2D()\n Set_Table_Param()\n hfar = HDF5File(outfile, 'w', debug=0)\n d1 = FTICRData(dim=2)\n copyaxes(d0, d1)\n group = 'resol1'\n hfar.create_from_template(d1, group)\n if n_of_column == 0:\n indexes = range(d0.size2)\n else:\n indexes = selectcol(d0, n_of_column)\n if algo == 'Cadzow':\n meth = cadz\n else:\n if algo == 'rQRd':\n meth = rqr\n else:\n raise 'wrong algo'\n t0 = time.time()\n if progress:\n widgets = [\n 'Processing %s: ' % algo, pg.Percentage(), ' ', pg.Bar(marker='-', left='[', right=']'), pg.ETA()]\n pbar = pg.ProgressBar(widgets=widgets, maxval=(len(indexes)))\n d1D = d0.col(0)\n xarg = iterarg(indexes, d0, n_of_line, n_of_iter, orda)\n if mpiutil.MPI_size > 1:\n mpiutil.mprint('MPI Master job - starting slave jobs - ')\n res = mpiutil.enum_imap(meth, xarg)\n for i, p in res:\n d1D.buffer = p\n d1.set_col(indexes[i], d1D)\n if progress:\n pbar.update(i + 1)\n\n else:\n import itertools\n res = itertools.imap(meth, xarg)\n for i, p in enumerate(res):\n d1D.buffer = p\n d1.set_col(indexes[i], d1D)\n if progress:\n pbar.update(i + 1)\n\n print('Processing time : ', time.time() - t0)\n\n\nif __name__ == '__main__':\n if mpiutil.MPI_size < 2:\n print('Running in single processor mode')\n main()\n else:\n print('Running in MPI mode')\n if mpiutil.MPI_rank == 0:\n main()\n else:\n mpiutil.slave()", "sub_path": "pycfiles/spike_py-0.99.15.tar/Cadzow_mpi2.cpython-37.py", "file_name": "Cadzow_mpi2.cpython-37.py", "file_ext": "py", "file_size_in_byte": 7030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tables.parameters", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tables.parameters", "line_number": 37, "usage_type": "attribute"}, {"api_name": "spike.NPKData.NPKData", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 106, "usage_type": "call"}, {"api_name": "spike.File.HDF5File", "line_number": 119, "usage_type": "call"}, {"api_name": "Cadzow.cadzow", "line_number": 138, "usage_type": "call"}, {"api_name": "urQRd.urQRd", "line_number": 147, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 158, "usage_type": "call"}, {"api_name": "spike.NPKConfigParser", "line_number": 161, "usage_type": "call"}, {"api_name": "spike.File.HDF5File", "line_number": 181, "usage_type": "call"}, {"api_name": "spike.FTICR.FTICRData", "line_number": 182, "usage_type": "call"}, {"api_name": "spike.NPKData.copyaxes", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "util.progressbar.Percentage", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar", "line_number": 200, "usage_type": "name"}, {"api_name": "util.progressbar.Bar", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar.ETA", "line_number": 200, "usage_type": "call"}, {"api_name": "util.progressbar.ProgressBar", "line_number": 201, "usage_type": "call"}, {"api_name": "util.progressbar", "line_number": 201, "usage_type": "name"}, {"api_name": "util.mpiutil.MPI_size", "line_number": 204, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 204, "usage_type": "name"}, {"api_name": "util.mpiutil.mprint", "line_number": 205, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 205, "usage_type": "name"}, {"api_name": "util.mpiutil.enum_imap", "line_number": 206, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 206, "usage_type": "name"}, {"api_name": "itertools.imap", "line_number": 215, "usage_type": "call"}, {"api_name": "time.time", "line_number": 222, "usage_type": "call"}, {"api_name": "util.mpiutil.MPI_size", "line_number": 226, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 226, "usage_type": "name"}, {"api_name": "util.mpiutil.MPI_rank", "line_number": 231, "usage_type": "attribute"}, {"api_name": "util.mpiutil", "line_number": 231, "usage_type": "name"}, {"api_name": "util.mpiutil.slave", "line_number": 234, "usage_type": "call"}, {"api_name": "util.mpiutil", "line_number": 234, "usage_type": "name"}]}
+{"seq_id": "254738972", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom monitoring_app.models import CompetitorProduct\nimport random\nfrom decimal import Decimal\n\n\ndef get_html(url):\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36'\n r = requests.get(url, headers={'User-Agent': user_agent})\n if r.ok:\n return r.text\n print(r.status_code)\n\n\ndef refined(s):\n s = s.replace('\\t', '').replace('\\n', '').replace('\\r', '')\n return s\n\n\ndef get_page_data(html):\n data_list = []\n soup = BeautifulSoup(html, 'lxml')\n divs = soup.find_all('a', class_=\"sel-product-tile-title\")\n\n for div in divs:\n url = 'https://www.mvideo.ru' + div.get('href')\n products = div.get('data-product-info').split('{')[1::2]\n\n for product in products:\n refined_product = refined(product)\n p = '{' + refined_product\n\n d = eval(p)\n\n id_product = d.get('productId')\n name = d.get('productName')\n price = d.get('productPriceLocal')\n categoryId = d.get('productCategoryId')\n categoryName = d.get('productCategoryName')\n vendorName = d.get('productVendorName')\n groupId = d.get('productGroupId')\n shop = 'М.видео'\n\n data = {'id_product': id_product,\n 'name': name,\n # генерация цены с рандомайзером для создания образца базы данных МОИХ товаров\n # 'price': float(price) + round(random.uniform(-1, 1)*400)*5,\n 'price': price,\n 'categoryId': categoryId,\n 'categoryName': categoryName,\n 'vendorName': vendorName.lower().title(),\n 'groupId': groupId,\n 'url': url,\n 'shop': shop}\n\n print(data)\n data_list.append(data)\n return data_list\n\n\ndef write_db(competitor_products):\n meta = {'updated_count': 0, 'created_count': 0}\n urls = [competitor_product.get('url') for competitor_product in competitor_products if\n competitor_product.get('url')]\n CompetitorProduct.objects.filter(url__in=urls).update(status=False)\n\n for competitor_product in competitor_products:\n url = competitor_product.get('url')\n if url:\n price = Decimal(competitor_product.get('price'))\n id_product = int(competitor_product.get('id_product'))\n categoryId = competitor_product.get('categoryId')\n categoryName = competitor_product.get('categoryName')\n vendorName = competitor_product.get('vendorName')\n groupId = competitor_product.get('groupId')\n shop = competitor_product.get('shop')\n name = competitor_product.get('name')\n\n _, created = CompetitorProduct.objects.update_or_create(url=url, defaults={'id_product': id_product,\n 'name': name,\n 'price': price,\n 'categoryId': categoryId,\n 'categoryName': categoryName,\n 'vendorName': vendorName,\n 'groupId': groupId,\n 'status': True,\n 'shop': shop})\n if created:\n meta['created_count'] += 1\n else:\n meta['updated_count'] += 1\n return meta\n\n\ndef mvideo(url_target, page_count):\n pattern = url_target + '/f/page={}'\n for i in range(1, int(page_count) + 1):\n url = pattern.format(str(i))\n html = get_html(url)\n product_list = get_page_data(html)\n write_db(product_list)\n product_count_on_page = len(product_list)\n print(\"-\" * 42 + \"\\nНа странице номер {} получено {} продуктов\".format(i,\n product_count_on_page) + \"\\n\" + \"-\" * 42)\n meta = write_db(product_list)\n print(f'--> {i}: {meta}')\n all_product_count = int(product_count_on_page) * int(page_count)\n print(\"-\" * 42 + \"\\nВсего на странице {} получено {} продуктов\".format(url_target,\n all_product_count) + \"\\n\" + \"-\" * 42)\n", "sub_path": "app/monitoring_app/parsers/mvideo.py", "file_name": "mvideo.py", "file_ext": "py", "file_size_in_byte": 4937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "monitoring_app.models.CompetitorProduct", "line_number": 66, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 71, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects.update_or_create", "line_number": 80, "usage_type": "call"}, {"api_name": "monitoring_app.models.CompetitorProduct.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "monitoring_app.models.CompetitorProduct", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "570154954", "text": "import matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sys import exit\nsns.set_style('white')\nsns.set_context('paper')\n# Plot adjustments:\nplt.rcParams.update({'ytick.labelsize': 28})\nplt.rcParams.update({'xtick.labelsize': 28})\nplt.rcParams.update({'axes.labelsize': 45})\nplt.rcParams.update({'legend.fontsize': 36})\nplt.rcParams.update({'axes.titlesize':50})\nplt.rcParams.update({'axes.grid': False})\n\nregression_data = ['52_log_GFP_0.01_LOO.txt',\n '52_log_sum_ratio_0.019_LOO.txt',\n 'lin_log_mKate_0.019_LOO.txt']\nclass_data = ['2016-06-22__GFP_above_parent_SEStructure_LOO.txt',\n '2016-06-22__sum_ratio_above_parent_SEStructure_LOO.txt',\n '2016-06-22__mKate_above_parent_structure_LOO.txt',\n]\nfile_names = ['GFP',\n 'sum_ratio',\n 'mKate']\nnames = ['localization',\n 'localization efficiency',\n 'expression']\nvalidations = [('log_GFP_52_0.01.txt', 'GFP_above_SEStructure.txt'),\n ('log_sum_ratio_52_0.019.txt', 'sum_ratio_above_SEStructure.txt'),\n ('log_mKate_lin_0.019.txt', 'mKate_above_structure.txt',)]\nys = ['log_GFP',\n 'log_sum_ratio',\n 'log_mKate']\n\nroot = '../../Programming Tools/Twist Project/'\ndata_folder = root + '2016-06-22/models/'\nvalidation_folder = root + '2016-06-22/validation/'\nplot_folder = 'plots/'\nparents = {'cschrimson':sns.xkcd_rgb['pale red'],\n 'c1c2':sns.xkcd_rgb['medium green'],\n 'cheriff':sns.xkcd_rgb['denim blue']}\nparent_names = ['cschrimson', 'c1c2', 'cheriff']\n\nformatter = mtick.FormatStrFormatter('%.0f')\n\nwith open(root + '2016-06-22/props.pkl', 'rb') as f:\n props = pickle.load(f, encoding='latin1')\nwith open(root + '2016-06-22/validation_props.pkl', 'rb') as f:\n v_props = pickle.load(f, encoding='latin1')\n\n# cdfs\nfig = plt.figure()\nfig.set_size_inches(11.5,8)\nax1 = fig.add_subplot(111)\nprops = props.dropna()\nprops = props.sort_values('log_mKate')\nprops['mKate_rank'] = np.linspace(0.0, 1.0, len(props))\nprops = props.sort_values('log_GFP')\nprops['GFP_rank'] = np.linspace(0.0, 1.0, len(props))\nprops = props.sort_values('log_sum_ratio')\nprops['ratio_rank'] = np.linspace(0.0, 1.0, len(props))\nalpha = 0.7\nmKate_handle, = ax1.plot(props['log_mKate'], props['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nGFP_handle, = ax1.plot(props['log_GFP'], props['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nratio_handle, = ax1.plot(props['log_sum_ratio'], props['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nax1.set_ylabel('cumulative probability')\nleg = ax1.legend(handles=[mKate_handle, GFP_handle, ratio_handle],\n loc='best', handletextpad=0)\nax1.margins(0.02)\nfig.savefig('plots/cdfs.pdf')\n\n# with verification\nkeep_me = ['name', 'log_mKate', 'log_GFP', 'log_sum_ratio']\nv_props = v_props[~v_props['name'].isin(parent_names)]\nall_props = pd.concat([props[keep_me], v_props[keep_me]])\nall_props = all_props.sort_values('log_mKate')\nall_props['mKate_rank'] = np.linspace(0.0, 1.0, len(all_props))\nall_props = all_props.sort_values('log_GFP')\nall_props['GFP_rank'] = np.linspace(0.0, 1.0, len(all_props))\nall_props = all_props.sort_values('log_sum_ratio')\nall_props['ratio_rank'] = np.linspace(0.0, 1.0, len(all_props))\nfig = plt.figure()\nfig.set_size_inches(11.5,8)\nax1 = fig.add_subplot(111)\nalpha = 0.1\nax1.plot(props['log_mKate'], props['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nax1.plot(props['log_GFP'], props['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nax1.plot(props['log_sum_ratio'], props['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nveri = all_props[all_props['name'].isin(v_props['name'])]\nalpha = 1.0\nax1.set_prop_cycle(None)\nmKate_handle, = ax1.plot(veri['log_mKate'], veri['mKate_rank'],\n 'o', label='expression', alpha=alpha)\nGFP_handle, = ax1.plot(veri['log_GFP'], veri['GFP_rank'],\n 'o', label='localization', alpha=alpha)\nratio_handle, = ax1.plot(veri['log_sum_ratio'], veri['ratio_rank'],\n 'o', label='localization efficiency', alpha=alpha)\nax1.set_ylabel('cumulative probability')\nleg = ax1.legend(handles=[mKate_handle, GFP_handle, ratio_handle],\n loc='best', handletextpad=0)\nax1.margins(0.02)\nfig.savefig('plots/verification_cdfs.pdf')\n\nfor reg, clas, file_name, name, validation, y in zip(regression_data,\n class_data, file_names,\n names, validations, ys):\n r_df = pd.read_csv(data_folder + reg, skiprows=1, comment='#')\n c_df = pd.read_csv(data_folder + clas, comment='#')\n r_v = pd.read_csv(validation_folder + validation[0], comment='#')\n c_v = pd.read_csv(validation_folder + validation[1], comment='#')\n\n # plot regression and classification LOOs side by side\n fig = plt.figure()\n fig.set_size_inches((24,9))\n ax1 = fig.add_subplot(121)\n ax1.plot(r_df['y'], r_df['mu'], 'o', ms=12, color='grey', alpha=0.5)\n for p in parent_names:\n ax1.plot(r_df[r_df['name']==p]['y'], r_df[r_df['name']==p]['mu'],\n 'o', ms=14, color=parents[p], alpha=0.8)\n ax1.set_xlabel('measured\\n' + name)\n ax1.set_ylabel('predicted ' + name)\n ax1.set_title('regression')\n xlims = ax1.get_xlim()\n if name != 'expression':\n ylims = ax1.get_ylim()\n ylims = ax1.set_ylim([xlims[0] * 0.75, ylims[1]])\n ax2 = fig.add_subplot(122)\n c_df['real'] = [props[props['name']==n][y] for n in c_df['name']]\n ax2.plot(c_df['real'], c_df['pi'], 'o', ms=12, color='grey', alpha=0.5)\n for p in parent_names:\n ax2.plot(c_df[c_df['name']==p]['real'], c_df[c_df['name']==p]['pi'],\n 'o', ms=14, color=parents[p], alpha=0.8)\n frac = 0.9\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0,\n box.width * frac, box.height])\n box = ax2.get_position()\n ax2.set_position([box.x0 - box.width * (1-frac), box.y0,\n box.width * frac, box.height])\n\n lg = plt.legend(('training set', 'CsChrimR', 'C1C2', 'CheRiff'),\n loc='center left', bbox_to_anchor=(1, 0.5),\n frameon=True, handletextpad=0, borderpad=0.03)\n lowest_parent = min(props[props['name'].isin(parent_names)][y])\n ylims = ax2.set_ylim([0, 1])\n xlims = ax2.set_xlim(xlims)\n ax2.set_title('classification')\n ax2.axvspan(xlims[0], lowest_parent, facecolor='grey', alpha=0.2)\n ax2.axvline(lowest_parent, color=sns.xkcd_rgb['gold'], alpha=0.8)\n ax2.set_xlabel('measured\\n' + name)\n ax2.set_ylabel('predicted prob above parent')\n fig.savefig('plots/' + file_name + '_LOO.pdf',\n bbox_inches='tight')\n # plot combined regression and classification side by side\n ax1.plot(r_v['y'], r_v['mu'], 'o', ms=12, color='black', alpha=0.9)\n ax2.plot(c_v['real'], c_v['pi'], 'o', ms=12, color='black', alpha=0.9)\n handles, labels = ax2.get_legend_handles_labels()\n lg = plt.legend(handles[0:4] + [handles[-1]],\n ('training set', 'CsChrimR', 'C1C2', 'CheRiff', 'verify'),\n loc = 'center left', bbox_to_anchor=(1, 0.5),\n frameon=True, handletextpad=0, borderpad=0.03)\n fig.savefig('plots/' + file_name + '_combined.pdf', bbox_inches='tight')\n", "sub_path": "2/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 7560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seaborn.set_style", "line_number": 8, "usage_type": "call"}, {"api_name": "seaborn.set_context", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 42, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 43, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 47, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 158, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}]}
+{"seq_id": "94564782", "text": "from Countries import France\nfrom mimesis import Person\nfrom mimesis.enums import Gender\n\nperson = Person(France['gen'])\nperson.full_name(gender=Gender.MALE)\n\n\"\"\"\ncs da de de-at de-ch el en en-au en-ca \nCzech Danish German Austrian german Swiss german Greek English Australian English Canadian English\n\nen-gb es es-mx et fa fi fr hu is it \nBritish English Spanish Mexican Spanish Estonian Farsi Finnish French Hungarian Icelandic Italian \n\nja kk ko nl nl-be no pl pt pt-br \nJapanese Kazakh Korean Dutch Belgium Dutch Norwegian Polish Portuguese Brazilian Portuguese \n\nru sv tr uk zh \nRussian Swedish Turkish Ukrainian Chinese \n\n\"\"\"\n\n# Romania\n\nRomanian_surnames = {'Popa', 'Popescu', 'Ionescu', 'Pop', 'Radu', 'Dumitru', 'Gheorghe', 'Stoica', 'Stan', 'Munteanu',\n 'Constantin', 'Andrei', 'Rusu', 'Anghel', 'Matei', 'Marin', 'Mihai', 'Ciobanu', 'Serban', 'Stefan',\n 'Lazar', 'Florea', 'Dumitrescu', 'Barbu', 'Stanciu', 'Vasile', 'Ilie', 'Cristea', 'Toma',\n 'Moldovan', 'Oprea', 'Dinu', 'Tudor', 'Ionita', 'Ion', 'Ungureanu', 'Constantinescu', 'Georgescu',\n 'Balan', 'Neagu', 'Dragomir', 'Badea', 'Cojocaru', 'Sandu', 'Mocanu', 'Enache', 'Nagy', 'Coman',\n 'Craciun', 'Lupu', 'Muresan', 'Vlad', 'Dobre', 'Tanase', 'Avram', 'Radulescu', 'Iordache',\n 'Grigore', 'Lungu', 'Ivan', 'Nicolae', 'Szabo', 'Bucur', 'Manea', 'Ene', 'Marinescu', 'Alexandru',\n 'Petre', 'Albu', 'Voicu', 'Preda', 'Iancu', 'Dragan', 'Olteanu', 'Stoian', 'David', 'Petrescu',\n 'Roman', 'Iacob', 'Filip', 'Diaconu', 'Costea', 'Baciu', 'Marcu', 'Rosu', 'Nistor', 'Kovacs',\n 'Pavel', 'Cretu', 'Stanescu', 'Anton', 'Simion', 'Luca', 'Nita', 'Calin', 'Rotaru', 'Nedelcu',\n 'Bogdan', 'Suciu', 'Crisan'}\n", "sub_path": "1930_name_gen.py", "file_name": "1930_name_gen.py", "file_ext": "py", "file_size_in_byte": 2360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mimesis.Person", "line_number": 5, "usage_type": "call"}, {"api_name": "Countries.France", "line_number": 5, "usage_type": "name"}, {"api_name": "mimesis.enums.Gender.MALE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "mimesis.enums.Gender", "line_number": 6, "usage_type": "name"}]}
+{"seq_id": "482545842", "text": "\"\"\"utility functions to build CLI.\"\"\"\n\nfrom __future__ import print_function\nimport six\nimport sys\nimport ctypes\n\nSYNUTIL_INSTANCE = None\n\n\ndef _get_synutil():\n global SYNUTIL_INSTANCE\n if SYNUTIL_INSTANCE is None:\n i = ctypes.cdll.LoadLibrary(\"libsynutil.so\")\n i.synutil_echo_ok.restype = None\n i.synutil_echo_ok.argtypes = [ctypes.c_char_p]\n i.synutil_echo_nok.restype = None\n i.synutil_echo_nok.argtypes = [ctypes.c_char_p]\n i.synutil_echo_warning.restype = None\n i.synutil_echo_warning.argtypes = [ctypes.c_char_p]\n i.synutil_echo_bold.restype = None\n i.synutil_echo_bold.argtypes = [ctypes.c_char_p]\n i.synutil_echo_running.restype = None\n i.synutil_echo_running.argtypes = []\n i.synutil_echo_clean.restype = None\n i.synutil_echo_clean.argtypes = []\n SYNUTIL_INSTANCE = i\n return SYNUTIL_INSTANCE\n\n\ndef echo_ok(message=\"\"):\n \"\"\"Write [OK] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_ok(message.encode('utf8'))\n\n\ndef echo_nok(message=\"\"):\n \"\"\"Write [ERROR] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_nok(message.encode('utf8'))\n\n\ndef echo_warning(message=\"\"):\n \"\"\"Write [WARNING] with colors if supported a little optional message.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n _get_synutil().synutil_echo_warning(message.encode('utf8'))\n\n\ndef echo_bold(message):\n \"\"\"Write a message in bold (if supported).\n\n Args:\n message (string): message to write in bold.\n\n \"\"\"\n _get_synutil().synutil_echo_bold(message.encode('utf8'))\n\n\ndef echo_running(message=None):\n \"\"\"Write [RUNNING] with colors if supported.\n\n You can pass an optional message which will be rendered before [RUNNING]\n on the same line.\n\n Args:\n message (string): little optional message.\n\n \"\"\"\n if message is None:\n _get_synutil().synutil_echo_running()\n else:\n if six.PY2:\n print(message, end=\"\")\n sys.stdout.flush()\n else:\n print(message, end=\"\", flush=True)\n _get_synutil().synutil_echo_running()\n\n\ndef echo_clean():\n \"\"\"Clean waiting status.\"\"\"\n _get_synutil().synutil_echo_clean()\n", "sub_path": "layers/layer1_python3/0100_mfutil/mfutil/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ctypes.cdll.LoadLibrary", "line_number": 14, "usage_type": "call"}, {"api_name": "ctypes.cdll", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 22, "usage_type": "attribute"}, {"api_name": "six.PY2", "line_number": 84, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 86, "usage_type": "attribute"}]}
+{"seq_id": "587857399", "text": "#!/usr/bin/python\n\nimport sys\nimport json\n\n#set the path to the input file\ntweets_data_path = 'C:/Users/Tanvi/Desktop/project2/stream/tweets_MH.txt'\n#tweets_data_path = 'C:/Users/Tanvi/Desktop/project2/stream/pollution/tweets_P.txt'\n\n#initialize an array and open the output file for reading\ntweets_file = open(tweets_data_path, \"r\")\n\n\n#process each line in input file\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n num_urls = len(tweet['entities']['urls'])\n #print(\"num_urls: \", num_urls)\n if num_urls > 0:\n for i in range(num_urls):\n url = tweet['entities']['urls'][i][\"expanded_url\"]\n if url:\n print (\"{}\\t{}\".format(url.lower(), 1))\n else:\n url = tweet['entities']['urls'][i][\"url\"]\n if url:\n print (\"{}\\t{}\".format(url.lower(), 1)) \n \n except:\n continue\n \n\n", "sub_path": "Code/mapper_topUrls.py", "file_name": "mapper_topUrls.py", "file_ext": "py", "file_size_in_byte": 975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.loads", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "461753459", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PCNet(nn.Module):\n\n def __init__(self, K: int, M: int, R_epochs: int = 150, R_lr: float = 0.1, lmda: float = 5e-3):\n '''\n Create a sparse coding network. Neural responses are fitted through ISTA algorithm.\n\n Args:\n K: number of neurons\n M: size of receptive field (width / height)\n R_epochs: number of epochs to run for ISTA\n R_lr: learning rate for ISTA\n lmda: regularization strength for ISTA\n '''\n super(PCNet, self).__init__()\n self.K = K\n self.M = M\n self.R_epochs = R_epochs\n self.R_lr = R_lr\n self.lmda = lmda\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # model weigths\n self.U = torch.randn(self.K, self.M ** 2, requires_grad=True, device=self.device)\n with torch.no_grad():\n self.U = F.normalize(self.U, dim=1)\n self.U.requires_grad_(True)\n # responses\n self.R = None\n\n def _ista(self, img_batch):\n # create R\n batch_size = img_batch.shape[0]\n self.R = torch.zeros((batch_size, self.K), requires_grad=True, device=self.device)\n # trian\n for _ in range(self.R_epochs):\n # pred\n pred = self.R @ self.U\n # loss\n loss = ((img_batch - pred) ** 2).sum()\n loss.backward()\n # update R in place\n self.R.data.sub_(self.R_lr * self.R.grad.data)\n # zero grad\n self.zero_grad()\n # soft thresholding\n with torch.no_grad():\n self.R = PCNet._soft_thresholding(self.R, self.lmda)\n self.R.requires_grad_(True)\n\n @staticmethod\n def _soft_thresholding(x, alpha):\n return F.relu(x - alpha) - F.relu(-x - alpha)\n\n def zero_grad(self):\n self.U.grad.data.zero_()\n self.R.grad.data.zero_()\n\n def forward(self, img_batch):\n # first fit\n self._ista(img_batch)\n # now predict again\n pred = self.R @ self.U\n return pred\n", "sub_path": "src/model/PCNet.py", "file_name": "PCNet.py", "file_ext": "py", "file_size_in_byte": 2149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 56, "usage_type": "name"}]}
+{"seq_id": "575177164", "text": "import torch\n\nchars_lower = [ chr(code) for code in range(ord('a'),ord('z')+1)]\nchars_upper = [ chr(code) for code in range(ord('A'),ord('Z')+1)]\nchars_special = [ code for code in \" -_.\" ]\ncode_special = [ \"?\", \"\", \"\", \"PAD\" ]\n\nSYMBOLS = chars_lower + chars_upper + chars_special + code_special\n\nMAX_LEN=27\nFIRST_LAYER_SIZE=MAX_LEN * len(SYMBOLS)\n\nMAX_OUT_LEN=22\nLAST_LAYER_SIZE=MAX_OUT_LEN * len(SYMBOLS)\n\nSEP_TOKEN = '[SEP]'\nCLS_TOKEN = '[CLS]'\nTRAIN_FILE_PATH = './data/labeled-2.csv'\nMODEL_FILE_PATH = '/share/model/predicate-model.pth'\nMODEL_OVERWRITE = False\nBATCH_SIZE = 2\nNUM_EPOCHS = 100\nGRADIENT_ACCUMULATION_STEPS = 8\nMAX_CLASS_SIZE = 20 # float(\"inf\") for all\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(DEVICE)\n", "sub_path": "abbrev-trainer/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}]}
+{"seq_id": "360354813", "text": "\"\"\"Integer field class & utilities.\"\"\"\nfrom gettext import gettext as _\nfrom typing import Any\nfrom typing import Optional\nfrom typing import cast\n\nfrom pofy.core.constants import UNDEFINED\nfrom pofy.core.errors import ErrorCode\nfrom pofy.core.interfaces import ILoadingContext\nfrom pofy.core.validation import ValidateCallback\nfrom pofy.fields.base_field import ScalarField\n\n\nclass IntField(ScalarField):\n \"\"\"Integer YAML object field.\"\"\"\n\n def __init__(\n self,\n base: int = 0,\n minimum: Optional[int] = None,\n maximum: Optional[int] = None,\n required: bool = False,\n validate: Optional[ValidateCallback] = None,\n ):\n \"\"\"Initialize int field.\n\n Args:\n base: Base in which this field is encoded. By default, base is 0,\n meaning that python will distinguish automatically decimal,\n octal, and hexadecimal notations from the string.\n minimum: Minimum value for the field. If the value is out of bound,\n a VALIDATION_ERROR will be raised.\n maximum: Maximum value for the field. If the value is out of bound,\n a VALIDATION_ERROR will be raised.\n required: See BaseField constructor.\n validate: See BaseField constructor.\n\n \"\"\"\n super().__init__(required=required, validate=validate)\n self._base = base\n self._minimum = minimum\n self._maximum = maximum\n\n def _convert(self, context: ILoadingContext) -> Any:\n node = context.current_node()\n value = node.value\n result: Optional[int] = None\n\n try:\n result = int(value, self._base)\n except ValueError:\n context.error(\n ErrorCode.VALUE_ERROR,\n _('Can\\'t convert \"{}\" to an integer'), value\n )\n return UNDEFINED\n\n return cast(Optional[int], ScalarField._check_in_bounds(\n context,\n result,\n self._minimum,\n self._maximum\n ))\n", "sub_path": "pofy/fields/int_field.py", "file_name": "int_field.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pofy.fields.base_field.ScalarField", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "pofy.core.validation.ValidateCallback", "line_number": 23, "usage_type": "name"}, {"api_name": "pofy.core.interfaces.ILoadingContext", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "pofy.core.errors.ErrorCode.VALUE_ERROR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pofy.core.errors.ErrorCode", "line_number": 53, "usage_type": "name"}, {"api_name": "gettext.gettext", "line_number": 54, "usage_type": "call"}, {"api_name": "pofy.core.constants.UNDEFINED", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "pofy.fields.base_field.ScalarField._check_in_bounds", "line_number": 58, "usage_type": "call"}, {"api_name": "pofy.fields.base_field.ScalarField", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}]}
+{"seq_id": "93054590", "text": "# coding=utf-8\nfrom django.core.mail import EmailMultiAlternatives\nimport datetime\n\"\"\"\nNot finish yet\n\"\"\"\n\n\nclass Mail(object):\n def __init__(self):\n pass\n\n def send(self, operations, email):\n \"\"\"\n\n :param operations: [{'type': op_type, 'state': state, 'l_url':l_url, 'l_name': l_name,\n 'cate_eng': cate_eng, 'cate_chn': cate_chn}, ...]\n op_type: add/update\n state: 成功/失败\n :param email:\n :return:\n \"\"\"\n subject = '%s Update Report' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M')\n text_content = 'content here'\n\n # content\n movie_list = []\n tv_list = []\n anime_list = []\n show_list = []\n for op in operations:\n if op.get('cate_eng') == 'movie':\n movie_list.append(op)\n elif op.get('cate_eng') == 'tv':\n tv_list.append(op)\n elif op.get('cate_eng') == 'anime':\n anime_list.append(op)\n elif op.get('cate_eng') == 'show':\n show_list.append(op)\n content = ''\n for item in (movie_list, tv_list, anime_list, show_list):\n for op in item:\n op.get('')\n content = ''.encode(\n 'utf8')\n html_content = open(\n BASE_DIR + '/templates/userinfo/mail/general_mail.html').read() \\\n .replace('subject_default', subject).replace('content_default',\n content).replace(\n 'link_default', '')\n\n from_email = '比格电影 '\n # from_email = 'bigedianying@gmail.com'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "sub_path": "spider/mail.py", "file_name": "mail.py", "file_ext": "py", "file_size_in_byte": 1857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 54, "usage_type": "call"}]}
+{"seq_id": "141808738", "text": "from typing import List\nimport numpy as np\n\n\nclass BaskinModel(object):\n '''\n Basking model implementation\n Asumes 0.4 factor for endurance limit\n Suitable for carbon steels\n Not applicable for alloys\n '''\n\n # Fatigue chart resolution\n NO_OF_CHART_POINTS = 200\n\n def __init__(\n self,\n faitgue_stress: List[float],\n ult_strength: float,\n modification_factor: float,\n ):\n self.fatigue_stress = faitgue_stress\n self.ult_strength = ult_strength\n self.modification_factor = modification_factor\n\n def get_baskin_params(self, derated=True):\n '''\n Calculates Baskin parameters for the fatigue curve\n '''\n # If data should be raw, not derated, applied modification factor = 1\n if derated == False:\n self.modification_factor = 1\n self.endurance_limit = 0.4 * self.ult_strength * self.modification_factor\n\n def s_1000_factor():\n '''\n Calculates starting point for fatigue curve\n Based on Shigley data, depends on material ultimate strength\n '''\n if self.ult_strength < 130:\n return (\n -1.4218548015713e-07 * self.ult_strength ** 3\n + 0.0000563482426806003 * self.ult_strength ** 2\n - 0.00832826468826188 * self.ult_strength\n + 1.25431693640081\n )\n return (\n -3.30944038409e-09 * self.ult_strength ** 3\n + 3.31244407581022e-06 * self.ult_strength ** 2\n - 0.00134990048235594 * self.ult_strength\n + 0.936702621709383\n )\n\n self.B_factor = (\n -1\n / 3\n * np.log10(\n s_1000_factor()\n * self.modification_factor\n * self.ult_strength\n / self.endurance_limit\n )\n )\n self.C_factor = np.log10(\n (self.modification_factor * s_1000_factor() * self.ult_strength) ** 2\n / self.endurance_limit\n )\n\n def get_allowable_cycles(self):\n '''\n Calculates allowable cycles based on modification factor and fatigue stress\n @return: List[float]\n '''\n self.get_baskin_params()\n allowable_cycles = []\n for stress in self.fatigue_stress:\n if stress <= self.endurance_limit:\n allowable_cycles.append(10 ** 12)\n else:\n allowable_cycles.append(\n 10 ** (-self.C_factor / self.B_factor)\n * stress ** (1 / self.B_factor)\n )\n return allowable_cycles\n\n def get_damage(self, required_cycles: List[float]):\n '''\n Calculates fatigue damage based on raquired and allwable cycle values\n @return: List[float]\n '''\n damage = []\n allowable_cycles = self.get_allowable_cycles()\n for req_cycles, allow_cycle in zip(required_cycles, allowable_cycles):\n damage.append(round(req_cycles / allow_cycle, 3))\n return damage\n\n def get_chart_data(self, derated):\n '''\n Evaluate fatigue chart using Baskin model\n User can evaluated raw (derated=False) or derated curve\n @return: tuple[List[float]]\n '''\n self.get_baskin_params(derated)\n print(f\"Modification factor {self.modification_factor}\")\n cycle_range = np.linspace(\n 1000, 1_000_000, num=self.NO_OF_CHART_POINTS, endpoint=True\n )\n stress = [10 ** self.C_factor * item ** self.B_factor for item in cycle_range]\n return cycle_range, stress\n", "sub_path": "api/src/fatigue/fatiguelife.py", "file_name": "fatiguelife.py", "file_ext": "py", "file_size_in_byte": 3690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 64, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 105, "usage_type": "call"}]}
+{"seq_id": "617959963", "text": "from django.contrib.auth import views as auth_views\nfrom django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.PostList.as_view(), name='index'),\n path('post/create/', views.PostCreate.as_view(), name='create'),\n path('post//details/', views.PostDetails.as_view(), name='details'),\n path('post//edit/', views.PostEdit.as_view(), name='edit'),\n path('post/drafts/', views.PostDrafts.as_view(), name='drafts'),\n path('post//publish/', views.PostPublish.as_view(), name='publish'),\n path('post//remove/', views.PostRemove.as_view(), name='remove'),\n path('accounts/', include('django.contrib.auth.urls')),\n # Function View\n # path('details//', views.post_detail, name='details'),\n]\n", "sub_path": "app/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "46585154", "text": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nimport ARM\n\nclass TargetListBox(wx.ListBox):\n def __init__(self,parent,winId,boxSize,dp,rl):\n wx.ListBox.__init__(self,parent,winId,size=boxSize)\n self.dbProxy = dp\n self.theDimMenu = wx.Menu()\n self.theDimMenu.Append(armid.DIMLIST_MENUADD_ID,'Add')\n self.theDimMenu.Append(armid.DIMLIST_MENUDELETE_ID,'Delete')\n self.theRiskList = rl\n self.theSelectedValue = ''\n self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown)\n wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUADD_ID,self.onAddDimension)\n wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUDELETE_ID,self.onDeleteDimension)\n\n def OnRightDown(self,evt):\n self.PopupMenu(self.theDimMenu)\n\n def onAddDimension(self,evt):\n targetList = self.dbProxy.targetNames(self.theRiskList.GetItems())\n from DimensionNameDialog import DimensionNameDialog\n dlg = DimensionNameDialog(self,'Target',targetList,'Add')\n if (dlg.ShowModal() == armid.DIMNAME_BUTTONACTION_ID):\n additionalDimension = dlg.dimensionName()\n self.Append(additionalDimension)\n self.theSelectedValue = additionalDimension\n\n def onDeleteDimension(self,evt):\n idx = self.GetSelection()\n if (idx == -1):\n errorText = 'No ' + self.theDimensionTable + ' selected'\n errorLabel = 'Delete ' + self.theDimensionTable\n dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n else:\n self.theSelectedValue = self.GetSelection()\n self.Delete(self.theSelectedValue)\n", "sub_path": "cairis/cairis/TargetListBox.py", "file_name": "TargetListBox.py", "file_ext": "py", "file_size_in_byte": 2311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "wx.ListBox", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wx.ListBox.__init__", "line_number": 25, "usage_type": "call"}, {"api_name": "wx.ListBox", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 27, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUADD_ID", "line_number": 28, "usage_type": "attribute"}, {"api_name": "armid.DIMLIST_MENUDELETE_ID", "line_number": 29, "usage_type": "attribute"}, {"api_name": "wx.EVT_RIGHT_DOWN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 33, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUADD_ID", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.EVT_MENU", "line_number": 34, "usage_type": "call"}, {"api_name": "armid.DIMLIST_MENUDELETE_ID", "line_number": 34, "usage_type": "attribute"}, {"api_name": "DimensionNameDialog.DimensionNameDialog", "line_number": 42, "usage_type": "call"}, {"api_name": "armid.DIMNAME_BUTTONACTION_ID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 53, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 53, "usage_type": "attribute"}]}
+{"seq_id": "591283925", "text": "# Python Moduals\nimport Queue \nimport platform\nimport time\nimport pandas as pd\n\n# Person python files\nimport data\nimport strategy\nimport portfolio\nimport execution\nimport visualize\n\n\n\nif platform.system() == \"Linux\":\n dirName = \"/home/nkippers/git/quantStartTrail1\"\nelse:\n dirName = \"/Users/noelkippers/git/quantStartTrail1\"\n\nevents = Queue.Queue()\nsymbol = [\"AAPL\"]\nstart_date = \"2014-01-10\"\n\nevents = Queue.Queue()\nbars = data.HistoricCSVDataHandler(events, dirName, symbol)\nstrategy = strategy.BuyAndHoldStrategy(bars, events)\nport = portfolio.NaivePortfolio(bars, events, start_date, initial_capital=100000.0)\nbroker = execution.SimulatedExecutionHandler(events)\nplotter = visualize.DataPlots(port)\n\n# Declare the components with respective parameters\n# bars = DataHandler(..)\n# strategy = Strategy(..)\n# port = Portfolio(..)\n# broker = ExecutionHandler(..)\n\nwhile True:\n # Update the bars (specific backtest code, as opposed to live trading)\n if bars.continue_backtest == True:\n bars.update_bars()\n else:\n break\n \n # Handle the events\n while True:\n try:\n event = events.get(False)\n except Queue.Empty:\n break\n else:\n if event is not None:\n# print event.type\n if event.type == 'MARKET':\n strategy.calculate_signals(event)\n port.update_timeindex(event)\n\n elif event.type == 'SIGNAL':\n port.update_signal(event)\n\n elif event.type == 'ORDER':\n broker.execute_order(event)\n\n elif event.type == 'FILL':\n port.update_fill(event)\n \n# time.sleep(1)\n # 10-Minute heartbeat\n# time.sleep(10*60)\nport.create_equity_curve_dataframe()\n# print port.output_summary_stats()\n# port.plot_summary()\n# plotter.plot_OHLC()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "platform.system", "line_number": 16, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 21, "usage_type": "call"}, {"api_name": "Queue.Queue", "line_number": 25, "usage_type": "call"}, {"api_name": "data.HistoricCSVDataHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "strategy.BuyAndHoldStrategy", "line_number": 27, "usage_type": "call"}, {"api_name": "portfolio.NaivePortfolio", "line_number": 28, "usage_type": "call"}, {"api_name": "execution.SimulatedExecutionHandler", "line_number": 29, "usage_type": "call"}, {"api_name": "visualize.DataPlots", "line_number": 30, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 49, "usage_type": "attribute"}, {"api_name": "strategy.calculate_signals", "line_number": 55, "usage_type": "call"}]}
+{"seq_id": "637916524", "text": "from pathlib import Path\nfrom os.path import getsize\nfrom shutil import copy\n\n\ndef print_d(mypath: Path) -> [Path]:\n\t'In the first menu between D and R, function for D'\n\ttemp = []\n\tfor x in mypath.iterdir():\n\t\tif x.is_file():\n\t\t\ttemp.append(x)\n\tb = sorted(temp)\n\treturn b\n\ndef print_r(mypath: Path) -> [Path]:\n\t'In the first menu between D and R, function for R'\n\ta.extend(print_d(mypath))\n\tfor x in sorted(mypath.iterdir()):\n\t\tif x.is_dir():\n\t\t\tprint_r(x)\n\treturn a\n\ndef print_a(b: [Path]) -> [Path]:\n\t'It is printing function exist mainly because many menu requires printing result'\n\tfor x in b:\n\t\tprint(x)\n\treturn b\n\ndef print_n(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for N'\n\tmylist = []\n\tfor x in a:\n\t\tif x.name == mystring:\n\t\t\tmylist.append(x)\n\treturn mylist\n\ndef print_e(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for E'\n\tmylist = []\n\tif mystring[0] == '.':\n\t\tmystring2 = mystring[1:]\n\telse:\n\t\tmystring2 = mystring\n\n\tfor x in a:\n\t\tif x.suffix[1:] == mystring2:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef textcheck(mystring: str, filepath: Path) -> bool:\n\t'In the second menu among N, E, T, <, >, it is used for T.'\n\t'I left the function of checking text separate from making list when push T'\n\tthe_file = open(filepath, 'r')\n\twhile True:\n\t\tline = the_file.readline()\n\t\tif line.endswith('\\n'):\n\t\t\tline = line[:-1]\n\t\t\tif mystring in line:\n\t\t\t\tthe_file.close()\n\t\t\t\treturn True\n\t\telif line == '':\n\t\t\tthe_file.close()\n\t\t\treturn False\n\t\telse:\n\t\t\tthe_file.close()\n\t\t\treturn False\n\ndef print_t(mystring: str) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for T.'\n\tmylist = []\n\tfor x in a:\n\t\tif textcheck(mystring, x):\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef print_gt(myint: int) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for >.'\n\tmylist = []\n\tfor x in a:\n\t\tif getsize(x) > myint:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef print_lt(myint: int) -> [Path]:\n\t'In the second menu among N, E, T, <, >, it is for <.'\n\tmylist = []\n\tfor x in a:\n\t\tif getsize(x) < myint:\n\t\t\tmylist.append(x)\n\t\t\tprint(x)\n\treturn mylist\n\ndef f_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for F.'\n\tfor x in mylist:\n\t\ttry:\n\t\t\tthe_file = open(x, 'r')\n\t\t\tline = the_file.readline()\n\t\t\tline = line[:-1]\n\t\t\tprint(line)\n\t\texcept:\n\t\t\tprint('NOT TEXT')\n\tthe_file.close()\n\ndef d_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for D.'\n\tfor x in mylist:\n\t\ty = str(x) + \".dup\"\n\t\tcopy(x, y)\n\ndef t_check(mylist: [Path]) -> None:\n\t'In the third menu among F D T, it is for T.'\n\tfor x in mylist:\n\t\tx.touch()\n\ndef main_menu() -> list:\n\t'It is the first menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tmypath = Path(myinput[2:])\n\t\tif ((myinput.startswith('D') or myinput.startswith('R')) \n\t\tand(len(myinput)>2) and (myinput[1] == ' ') and (mypath.exists())):\n\t\t\tmyloop = False\n\t\t\tif myinput[0] == 'D':\n\t\t\t\td = print_d(mypath)\n\t\t\t\tprint_a(d)\n\t\t\t\treturn d\n\t\t\telif myinput[0] == 'R':\n\t\t\t\tr = print_r(mypath)\n\t\t\t\tprint_a(r)\n\t\t\t\treturn r\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\t\ndef second_menu(b: [Path]) -> [Path]:\n\t'It is the second menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tmystring = myinput[2:]\n\t\tif myinput == 'A':\n\t\t\tmyloop = False\n\t\t\tprint_a(b)\n\t\telif ((myinput.startswith('N') or myinput.startswith('E') or \n\t\tmyinput.startswith('T') or myinput.startswith('<') or \n\t\tmyinput.startswith('>')) and (len(myinput)>2) and (myinput[1] == ' ')):\n\t\t\tmyloop = False\n\t\t\tif myinput[0] == 'N':\n\t\t\t\tn = print_n(mystring)\n\t\t\t\tprint_a(n)\n\t\t\t\treturn n\n\t\t\telif myinput[0] == 'E':\n\t\t\t\te = print_e(mystring)\n\t\t\t\tprint_a(e)\n\t\t\t\treturn e\n\t\t\telif myinput[0] == 'T':\n\t\t\t\tt = print_t(mystring)\n\t\t\t\tprint_a(t)\n\t\t\t\treturn t\n\t\t\telif myinput[0] == '<':\n\t\t\t\tlt = print_lt(int(mystring))\n\t\t\t\tprint_a(lt)\n\t\t\t\treturn lt\n\t\t\telif myinput[0] == '>':\n\t\t\t\tgt = print_gt(int(mystring))\n\t\t\t\tprint_a(gt)\n\t\t\t\treturn gt\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\ndef third_menu(b: Path) -> None:\n\t'It is the third menu'\n\tmyloop = True\n\twhile myloop:\n\t\tmyinput = input('')\n\t\tif ((myinput == 'F') or (myinput == 'D') or (myinput == 'T')):\n\t\t\tmyloop = False\n\t\t\tif myinput == 'F':\n\t\t\t\tf_check(b)\n\t\t\telif myinput == 'D':\n\t\t\t\td_check(b)\n\t\t\telif myinput == 'T':\n\t\t\t\tt_check(b)\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\nif __name__ == '__main__':\n\ta = []\n\tmyfirstmenu = main_menu()\n\tmysecondmenu = second_menu(myfirstmenu)\n\n\tthird_menu(mysecondmenu)\n\n", "sub_path": "project1/project1_09.py", "file_name": "project1_09.py", "file_ext": "py", "file_size_in_byte": 4419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 23, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 82, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 91, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 108, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 112, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 124, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 139, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 175, "usage_type": "name"}]}
+{"seq_id": "351357956", "text": "#-*- coding: utf-8 -*-\nimport pymysql\nimport urllib.request\nimport json\nimport time\nimport sys\nimport re\nfrom bs4 import BeautifulSoup\nfrom enum import Enum\nimport hashlib\nfrom datetime import timedelta, timezone, datetime\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom queue import Queue\nfrom collections import OrderedDict\nimport schedule # pip install schedule, https://github.com/dbader/schedule\nfrom time import gmtime, strftime\nimport pytz\n\nfrom dbConfig import *\n\n\ncoin_name_list = ['BTC', 'ETH', 'DASH', 'LTC', 'ETC', 'XRP', 'BCH', 'XMR', 'ZEC'] # 9개 (QTUM 제외)\n# coin_name_list = ['ETH', 'DASH', 'LTC', 'ETC', 'XRP', 'BCH', 'XMR', 'ZEC'] # 8개\n\nlength_process = 100\n\ninsert_trade_sql = \"INSERT INTO `TRADE_{0:s}` (`date`, `exchange_rate`, `price`, `price2`, `amount`, `total`, `type`, `exchange`, `count`, `trade_id`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\nselect_trade_sql = \"SELECT count(*) FROM `TRADE_{0:s}` WHERE `trade_id`=%s\"\nselect_in_sql = \"SELECT `trade_id` FROM `TRADE_{0:s}` WHERE %s\"\nselect_trade_all_sql = \" UNION \".join(list(map(lambda x: \"(SELECT '\" + x + \"', `price2`, `date` FROM trade.TRADE_\" + x + \" order by id desc limit 1)\", coin_name_list)))\nselect_in_sql = \"SELECT `trade_id` FROM `TRADE_{0:s}` WHERE %s\"\nselect_f_exchange_sql = \"SELECT * FROM `F_EXCHANGE` WHERE `timestamp`=%s and `quote`=%s;\"\ninsert_f_exchange_sql = \"INSERT INTO `F_EXCHANGE` (`timestamp`, `quote`) VALUES (%s, %s);\"\n\nclass Terms_Bithumb(Enum):\n sell = \"bid\"\n buy = \"ask\"\n\nclass Terms_Poloniex(Enum):\n sell = \"sell\"\n buy = \"buy\"\n\ncumulative_bithumb_call_count = 0\ncumulative_poloniex_call_count = 0\n\nprevious_t_bithumb = {}\nprevious_t_poloniex = {}\nexist_bithumb_table_records = {}\n\nfor coin_name in coin_name_list:\n previous_t_bithumb[coin_name] = [{\"transaction_date\":\"1511031406\",\"type\":\"ask\",\"units_traded\":\"0.042\",\"price\":\"9047000\",\"total\":\"379974\"}]\n previous_t_poloniex[coin_name] = [\n {\"globalTradeID\": 266012461, \"tradeID\": 12459517, \"date\": \"2017-11-22 01:34:12\", \"type\": \"sell\",\n \"rate\": \"8101.67292206\", \"amount\": \"0.05619519\", \"total\": \"455.27504917\"}]\n exist_bithumb_table_records[coin_name] = OrderedDict()\n\nrecent_foreign_exchange_rate = (\"1092.90\", \"2017-11-22 11:16:00\")\n\nclass ConnectionPool():\n \"\"\"\n Usage:\n conn_pool = ConnectionPool(max_pool_size = 5)\n conn = conn_pool.get_connection()\n conn_pool.return_connection(db)\n conn_pool.close()\n \"\"\"\n def __init__(self, max_pool_size=5):\n self.max_pool_size = max_pool_size\n self.initialize_pool()\n\n def initialize_pool(self):\n self.pool = Queue(maxsize=self.max_pool_size)\n for _ in range(0, self.max_pool_size):\n self.pool.put_nowait(\n pymysql.connect(host=dbURL,\n port=dbPort,\n user=dbUser,\n passwd=dbPass,\n db=dbName,\n charset='utf8mb4',\n use_unicode=True\n )\n )\n\n def get_connection(self):\n # returns a conn instance when one is available else waits until one is\n conn = self.pool.get(True)\n\n # checks if conn is still connected because conn instance automatically closes when not in used\n if not self.ping(conn):\n conn.connect()\n\n return conn\n\n def return_connection(self, conn):\n return self.pool.put_nowait(conn)\n\n def close(self):\n while not self.is_empty():\n self.pool.get().close()\n\n def ping(self, conn):\n data = conn.query('SELECT 1', [])\n return data\n\n def get_initialized_connection_pool(self):\n return self.pool\n\n def is_empty(self):\n return self.pool.empty()\n\nconn_pool = ConnectionPool(max_pool_size = 5)\n\ndef by_trade_timestamp(trade):\n return trade['transaction_date']\n\ndef by_trade_date(trade):\n return trade['date']\n\ndef utc_to_asia_seoul(utc_dt):\n local_tz = pytz.timezone('Asia/Seoul')\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_tz.normalize(local_dt)\n\ndef get_trade_bithumb(coin_name, conn):\n global previous_t_bithumb\n global exist_bithumb_table_records\n\n try:\n url_s = 'https://api.bithumb.com/public/recent_transactions/' + coin_name + '?count=' + str(length_process)\n raw_read = urllib.request.urlopen(url_s).read()\n t_bithumb = json.loads(raw_read)['data']\n except BaseException as e:\n print(\"Bithumb API Exception!!! - \", e)\n return 0\n\n for trade in t_bithumb:\n timestamp = int(time.mktime(datetime.strptime(trade['transaction_date'], \"%Y-%m-%d %H:%M:%S\").timetuple()))\n trade['transaction_date'] = timestamp\n\n t_bithumb.sort(key=by_trade_timestamp, reverse=True)\n\n last_t_trade = t_bithumb[-1]\n\n found_same = False\n for p_idx, p_trade in enumerate(previous_t_bithumb[coin_name]):\n if p_trade['transaction_date'] == last_t_trade['transaction_date'] and \\\n p_trade['units_traded'] == last_t_trade['units_traded'] and \\\n p_trade['price'] == last_t_trade['price'] and \\\n p_trade['type'] == last_t_trade['type'] and \\\n p_trade['total'] == last_t_trade['total']:\n found_same = True\n break\n if found_same:\n new_trade_list = t_bithumb[:len(t_bithumb) - p_idx - 1]\n else:\n new_trade_list = t_bithumb\n\n previous_t_bithumb[coin_name] = sorted(t_bithumb, key=by_trade_timestamp, reverse=True)\n\n if len(new_trade_list) > 0:\n new_trade_list.reverse()\n trade_id_count = {}\n for trade in new_trade_list:\n date = trade['transaction_date']\n amount = trade['units_traded']\n price = trade['price']\n type = Terms_Bithumb(trade['type']).name\n total = trade['total']\n trade_id = hashlib.sha224((str(date) + amount + price + type + total).encode('utf-8')).hexdigest()\n\n if trade_id in exist_bithumb_table_records[coin_name].keys():\n trade_id_count[trade_id] = exist_bithumb_table_records[coin_name][trade_id] + 1\n else:\n if trade_id in trade_id_count.keys():\n trade_id_count[trade_id] += 1\n else:\n trade_id_count[trade_id] = 1\n try:\n date = datetime.fromtimestamp(trade['transaction_date']).strftime('%Y-%m-%d %H:%M:%S')\n exist_bithumb_table_records[coin_name][trade_id] = trade_id_count[trade_id]\n cursor = conn.cursor()\n cursor.execute(\n insert_trade_sql.format(coin_name),\n (date, str(1.0), price, price, amount, total, type, 'bithumb', trade_id_count[trade_id], trade_id)\n )\n except Exception as e:\n print(\"Bithumb Insert Exception\", e)\n pass\n conn.commit()\n\n over_num_queue = len(exist_bithumb_table_records[coin_name]) - length_process * 2\n if over_num_queue > 0:\n for _ in range(over_num_queue):\n exist_bithumb_table_records[coin_name].popitem(last=False)\n\n return len(new_trade_list)\n\ndef get_trade_poloniex(coin_name, conn):\n global previous_t_poloniex\n\n try:\n url_s = 'https://poloniex.com/public?command=returnTradeHistory¤cyPair=USDT_' + coin_name + '&limit=' + str(length_process)\n raw_read = urllib.request.urlopen(url_s).read()\n t_poloniex = json.loads(raw_read)\n except BaseException as e:\n print(\"Poloniex API Exception!!! - \", e)\n return 0\n\n\n for trade in t_poloniex:\n timestamp = int(time.mktime(datetime.strptime(trade['date'], \"%Y-%m-%d %H:%M:%S\").timetuple()))\n trade['date'] = timestamp\n\n t_poloniex.sort(key=by_trade_date, reverse=True)\n\n last_t_trade = t_poloniex[-1]\n\n found_same = False\n for p_idx, p_trade in enumerate(previous_t_poloniex[coin_name]):\n if p_trade['globalTradeID'] == last_t_trade['globalTradeID']:\n found_same = True\n break\n if found_same:\n new_trade_list = t_poloniex[:len(t_poloniex) - p_idx - 1]\n else:\n new_trade_list = t_poloniex\n\n previous_t_poloniex[coin_name] = sorted(t_poloniex, key=by_trade_date, reverse=True)\n\n if len(new_trade_list) > 0:\n new_trade_list.reverse()\n trade_id_count = {}\n for trade in new_trade_list:\n date = datetime.fromtimestamp(trade['date']) + timedelta(hours=9)\n date = datetime.fromtimestamp(date.timestamp()).strftime('%Y-%m-%d %H:%M:%S')\n exchange_rate = float(recent_foreign_exchange_rate[0])\n price = trade['rate']\n price2 = float(trade['rate']) * exchange_rate\n amount = trade['amount']\n type = Terms_Poloniex(trade['type']).name\n total = float(trade['amount']) * price2\n trade_id = str(trade['globalTradeID'])\n\n try:\n\n cursor = conn.cursor()\n cursor.execute(\n insert_trade_sql.format(coin_name),\n (date, str(exchange_rate), price, str(price2), amount, str(total), type, 'poloniex', 1, trade_id)\n )\n except Exception as e:\n print(\"Poloniex Insert Exception\", e)\n pass\n conn.commit()\n\n return len(new_trade_list)\n\ndef get_foreign_exchange():\n def getPage(url):\n \"\"\"\n url 정보의 내용을 조회한다.\n \"\"\"\n try:\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n content = res.read()\n except:\n content = \"\"\n\n return content\n\n def getExchangeOfNation(soup):\n dicExchange = {}\n\n alpha = '([A-Z]+)'\n\n for item in soup.table('tr')[2:]:\n # 정보 파싱\n nation = item('td')[0].text.strip()\n re_result = re.search(alpha, nation)\n nation = re_result.groups()[0]\n\n basicRateOfExchange = item('td')[1].text # 매매기준환율\n cash_buy = item('td')[2].text # 현찰 살때\n cash_sell = item('td')[3].text # 현찰 팔때\n transfer_send = item('td')[4].text # 송금 보낼 때\n transfer_receive = item('td')[5].text # 송금 받을 때\n\n dicExchange[nation] = {'basicRate': basicRateOfExchange, 'cashBuy': cash_buy, \\\n 'cashSell': cash_sell, 'transferSend': transfer_send,\n 'transferReceive': transfer_receive}\n\n return dicExchange\n\n # naver 환율 페이지 조회\n url = \"http://info.finance.naver.com/marketindex/exchangeList.nhn\"\n\n # page 내용을 조회한다.\n try:\n res = getPage(url)\n\n soup = BeautifulSoup(res, 'html.parser')\n nationExchangeRate = getExchangeOfNation(soup)\n except BaseException as e:\n print(\"get_foreign_exchange - Exception!!! - \", e)\n return\n\n # 최신 정보로 변경\n global recent_foreign_exchange_rate\n new_rate = nationExchangeRate['USD']['basicRate'].replace(',','')\n now = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n print(\"Foreign Exchange Rate Changed - \", new_rate, now)\n recent_foreign_exchange_rate = (new_rate, now)\n\ndef gmail_send():\n conn = conn_pool.get_connection()\n cursor = conn.cursor()\n cursor.execute(select_trade_all_sql)\n rows = cursor.fetchall()\n\n msg_str = \"\"\n btc_price = None\n for row in rows:\n coin_name = str(row[0])\n if coin_name == 'BTC':\n btc_price = str(row[1])\n msg_str += coin_name + \" - Price: \" + str(row[1]) + \" - Date: \" + str(row[2]) + \"
\"\n\n msg_exchange = \"Basic Exchange Rate: \" + recent_foreign_exchange_rate[0] + \" - Date:\" + recent_foreign_exchange_rate[1]\n\n msg_content = '[Trading Information Collection Status]
{msg_str}
{msg_exchange}'.format(\n msg_str=msg_str,\n msg_exchange=msg_exchange\n )\n message = MIMEText(msg_content, 'html')\n\n message['From'] = 'ManuscriptLink '\n message['To'] = 'Youn-Hee Han '\n message['Subject'] = 'Trading Information Collection Status (BTC: {btc_price})'.format(btc_price=btc_price)\n\n msg_full = message.as_string()\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(email_account, email_password)\n server.sendmail(email_account, ['support@thinkonweb.com'], msg_full)\n server.quit()\n print(\"Gmail Sent! -\", strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n conn_pool.return_connection(conn)\n\nif __name__ == \"__main__\":\n schedule.every().day.at(\"10:30\").do(gmail_send)\n #schedule.every().minute.do(gmail_send)\n schedule.every().minute.do(get_foreign_exchange)\n # schedule.every().day.at(\"10:30\").do(job)\n # schedule.every(5).to(10).minutes.do(job)\n # schedule.every().monday.do(job)\n # schedule.every().wednesday.at(\"13:15\").do(job)\n\n start_time = datetime.now(timezone.utc)\n\n try:\n while True:\n schedule.run_pending()\n conn = conn_pool.get_connection()\n for coin_name in coin_name_list:\n insert_count_bithumb = get_trade_bithumb(coin_name, conn)\n insert_count_poloniex = get_trade_poloniex(coin_name, conn)\n\n cumulative_bithumb_call_count += 1\n cumulative_poloniex_call_count += 1\n\n print(\"{0:6s}: New Bithumb Trade:{1:3d}, New Poloniex Trade:{2:3d} - {3:s}\".format(\n \"[\" + coin_name + \"]\",\n insert_count_bithumb,\n insert_count_poloniex,\n str(utc_to_asia_seoul(datetime.now(timezone.utc)))\n ))\n sys.stdout.flush()\n\n elapsed_time = (datetime.now(timezone.utc) - start_time).seconds\n print(\" Bithumb API Call Rate: {:5.2f} calls/sec. (It should be less than 20 calls/sec.)\".format(cumulative_bithumb_call_count / elapsed_time))\n print(\"Poloniex API Call Rate: {:5.2f} calls/sec. (It should be less than 6 calls/sec.)\".format(cumulative_poloniex_call_count / elapsed_time))\n print()\n\n conn_pool.return_connection(conn)\n except BaseException as e:\n print(e)\n finally:\n print(\"Finally!!!!\")\n conn_pool.close()", "sub_path": "0.Common/3.CoinTrading/trade_info_collector.py", "file_name": "trade_info_collector.py", "file_ext": "py", "file_size_in_byte": 14610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 36, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 40, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 56, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 73, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 76, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 122, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 123, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 132, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 132, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 132, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 133, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "name"}, {"api_name": "hashlib.sha224", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 205, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 205, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 205, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 206, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 236, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 237, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 266, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 266, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 266, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 267, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 267, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 267, "usage_type": "name"}, {"api_name": "re.search", "line_number": 282, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 304, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 313, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 313, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 337, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 345, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 350, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 350, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 354, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 356, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 362, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 362, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 362, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 362, "usage_type": "name"}, {"api_name": "schedule.run_pending", "line_number": 366, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 379, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 379, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 379, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 379, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 381, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 381, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 383, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 383, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 383, "usage_type": "name"}]}
+{"seq_id": "55576176", "text": "import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nPATH = 'D:/bookPic/'\r\nfilename = 'rada2.jpg'\r\n\r\n#img = cv2.imread('wrongPath/rada2.jpg')\r\nimg = cv2.imread(PATH+filename)\r\nprint(type(img))\r\nif not isinstance(img, np.ndarray):\r\n print('Unsuccessfully load the image \"{}\"'.format(filename))\r\n exit()\r\nRGB_img = cv2.cvtColor(img, cv2.COLOR_B)\r\n#downsize the image to half of the original\r\nnew_width, new_height = int(RGB_img.shape[1]/2), int(RGB_img.shape[0]/2)\r\nRGB_resize = cv2.resize(RGB_img.copy(), (new_width, new_height))\r\n#save the images\r\ncv2.imwrite('C:/test/rada2_resize.jpg',RGB_resize)\r\ncv2.imwrite('C:/test/rada2.jpg', img)\r\n\r\nax1 = plt.subplot(1, 3, 1)\r\nplt.imshow(img)\r\nax1.set_title('img')\r\nax2 = plt.subplot(1, 3, 2, yticklabels = [])\r\nplt.imshow(RGB_img)\r\nax2.set_title('RGB_img')\r\nax3 = plt.subplot(1, 3, 3, yticklabels = [])\r\nplt.imshow(RGB_resize)\r\nax3.set_title('RGB_resize')\r\nplt.show()", "sub_path": "ex 3.3.py", "file_name": "ex 3.3.py", "file_ext": "py", "file_size_in_byte": 930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_B", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "601031311", "text": "from dataLoaders import DigitData\nfrom catalyst.dl import SupervisedRunner, CallbackOrder, Callback, CheckpointCallback\nfrom config import *\nfrom funcs import get_dict_from_class\nfrom models import FeatureExtractor,FCLayered\nfrom losses import BCELoss\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom catalyst import dl\nfrom callbacks import MetricsCallback\nfrom sklearn.model_selection import StratifiedKFold\nimport torch\ndef train(Model1,DataLoad1):\n randSeed=23\n data_load = DigitData(**get_dict_from_class(DataLoad1))\n criterion = BCELoss()\n model = FeatureExtractor(**get_dict_from_class(Model1))\n # model = FCLayered(**get_dict_from_class(Model1))\n if False:\n checkpoint = torch.load(str(saveDirectory) + '/featureExtr_4_100.pth')\n model.load_state_dict(checkpoint)\n model.eval()\n optimizer = optim.Adam(model.parameters(), lr=lr)\n\n skf = StratifiedKFold(n_splits=15, shuffle=True, random_state=randSeed)\n train = data_load.data\n train[\"fold\"] = -1\n\n # train.set_index('index',inplace=True)\n for fold_id, (train_index, val_index) in enumerate(skf.split(train, train[\"fold\"])):\n train.iloc[val_index, -1] = fold_id\n\n # # check the proportion\n fold_proportion = pd.pivot_table(train, columns=\"fold\", values=\"label\", aggfunc=len)\n\n use_fold = 0\n\n train_file = train.query(\"fold != @use_fold\")\n val_file = train.query(\"fold == @use_fold\")\n\n print(\"[fold {}] train: {}, val: {}\".format(use_fold, len(train_file), len(val_file)))\n\n loaders = {\n \"train\": DataLoader(DigitData(data_frame=train_file, **get_dict_from_class(DataLoad1)),\n batch_size=512,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n drop_last=False),\n \"valid\": DataLoader(DigitData(data_frame=val_file, **get_dict_from_class(DataLoad1)),\n batch_size=512,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n drop_last=False)\n }\n\n callbacks = [\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\", num_classes=10, topk_args=[1]),\n\n MetricsCallback(input_key=\"targets\", output_key=\"logits\",\n directory=saveDirectory, model_name='featureExtr_4'),\n # CheckpointCallback(save_n_best=0)\n ]\n runner = SupervisedRunner(\n\n output_key=\"logits\",\n input_key=\"image_pixels\",\n target_key=\"targets\")\n # scheduler=scheduler,\n\n runner.train(\n model=model,\n criterion=criterion,\n loaders=loaders,\n optimizer=optimizer,\n\n num_epochs=epoch,\n verbose=True,\n logdir=f\"fold0\",\n callbacks=callbacks,\n )\n\n # main_metric = \"epoch_f1\",\n # minimize_metric = False\n c = 0\nif __name__ == \"__main__\":\n train(Model1,DataLoad1)", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dataLoaders.DigitData", "line_number": 16, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 16, "usage_type": "call"}, {"api_name": "losses.BCELoss", "line_number": 17, "usage_type": "call"}, {"api_name": "models.FeatureExtractor", "line_number": 18, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 24, "usage_type": "name"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.pivot_table", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "dataLoaders.DigitData", "line_number": 45, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "dataLoaders.DigitData", "line_number": 51, "usage_type": "call"}, {"api_name": "funcs.get_dict_from_class", "line_number": 51, "usage_type": "call"}, {"api_name": "catalyst.dl.AccuracyCallback", "line_number": 60, "usage_type": "call"}, {"api_name": "catalyst.dl", "line_number": 60, "usage_type": "name"}, {"api_name": "callbacks.MetricsCallback", "line_number": 62, "usage_type": "call"}, {"api_name": "catalyst.dl.SupervisedRunner", "line_number": 66, "usage_type": "call"}]}
+{"seq_id": "496942654", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jla', '0003_auto_20160822_1013'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='colaboradores',\n name='colnivel',\n field=models.IntegerField(null=True, db_column='ColNivel', blank=True),\n ),\n migrations.AlterModelTable(\n name='recibos',\n table='jla_recibos',\n ),\n ]\n", "sub_path": "jla/migrations/0004_auto_20160823_0638.py", "file_name": "0004_auto_20160823_0638.py", "file_ext": "py", "file_size_in_byte": 548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelTable", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "284991198", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/evogrid/caching/ram.py\n# Compiled at: 2006-08-10 15:57:20\n\"\"\"RAM-based cache implementation\n\nThis RAM cache is inspired on zope.app.cache.ram but a bit simpler cause we\ndon't want to inherit from ``Persistent`` and has a slightly different\ninterface as well.\n\nThe original implementation of RAMCache is copyright Zope Corporation and\ncontributors and is distributed under the terms of the Zope Public License.\n\"\"\"\nfrom cPickle import dumps\nfrom evogrid.caching.interfaces import ICache\nfrom threading import Lock\nfrom zope.interface import implements\n_marker = object()\n\nclass RAMCache(object):\n \"\"\"Cache implementation that stores entries in a python dict\"\"\"\n __module__ = __name__\n implements(ICache)\n hits = 0\n misses = 0\n max_entries = None\n\n def __init__(self, max_entries=None):\n self.max_entries = max_entries\n self._store = {}\n self._sorted_keys = []\n self._lock = Lock()\n\n def __len__(self):\n return len(self._store)\n\n def invalidate(self, key=None):\n if key is None:\n self._lock.acquire()\n try:\n self._store.clear()\n del self._sorted_keys[:]\n finally:\n self._lock.release()\n else:\n key = self._buildKey(key)\n if key not in self._store:\n return\n self._lock.acquire()\n try:\n if key in self._store:\n del self._store[key]\n self._sorted_keys.remove(key)\n finally:\n self._lock.release()\n return\n\n def query(self, key, default=None):\n \"\"\"Search the store to find a matching entry\n\n If nothing is found return default. If a matching entry is found,\n the _sorted_keys list order is updated. The misses and hits counters\n are updated.\n \"\"\"\n key = self._buildKey(key)\n _store, _sorted_keys = self._store, self._sorted_keys\n result = _store.get(key, _marker)\n if result is _marker:\n self.misses += 1\n return default\n self._lock.acquire()\n try:\n if key in _store:\n _sorted_keys.remove(key)\n _sorted_keys.insert(0, key)\n finally:\n self._lock.release()\n self.hits += 1\n return result\n\n def set(self, key, data):\n \"\"\"Add data to the store\n\n Check that the store size does not exceed ``max_entries``.\n \"\"\"\n key = self._buildKey(key)\n _store, _sorted_keys = self._store, self._sorted_keys\n if key in _store and _store[key] == data:\n return\n self._lock.acquire()\n try:\n if key not in _store:\n len_self = len(self)\n max_entries = self.max_entries\n if max_entries is not None and len_self >= max_entries:\n for i in xrange(len_self - max_entries + 1):\n del _store[_sorted_keys.pop()]\n\n _store[key] = data\n _sorted_keys.insert(0, key)\n finally:\n self._lock.release()\n return\n\n def _buildKey(kw):\n \"\"\"Build a tuple which can be used as an index for a cached value\"\"\"\n k = tuple(sorted(kw.iteritems()))\n try:\n return hash(k)\n except TypeError:\n return dumps(k)\n\n _buildKey = staticmethod(_buildKey)", "sub_path": "pycfiles/evogrid-0.1.0-py2.4/ram.py", "file_name": "ram.py", "file_ext": "py", "file_size_in_byte": 3622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "zope.interface.implements", "line_number": 25, "usage_type": "call"}, {"api_name": "evogrid.caching.interfaces.ICache", "line_number": 25, "usage_type": "argument"}, {"api_name": "threading.Lock", "line_number": 34, "usage_type": "call"}, {"api_name": "cPickle.dumps", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "105357986", "text": "import boto3\nimport os\n\nec2 = boto3.resource('ec2')\ninstance_name = os.environ['instance_name']\n\ndef lambda_handler(event, context):\n print ('Enters into the function')\n instances = ec2.instances.filter(\n Filters=[{'Name': 'tag:Name', 'Values': [instance_name]}]).stop()", "sub_path": "stop_instance_by_tagname.py", "file_name": "stop_instance_by_tagname.py", "file_ext": "py", "file_size_in_byte": 279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "boto3.resource", "line_number": 4, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}]}
+{"seq_id": "7599653", "text": "import pickle\r\nfrom io import StringIO\r\n\r\nfrom flask import Flask, request, make_response, Response, send_file\r\n\r\nfrom flasgger import Swagger\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics import accuracy_score\r\nimport pickle\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\n\r\napp = Flask(__name__)\r\nSwagger(app)\r\n\r\npickle_in = open(\"classifier.pkl\", \"rb\")\r\nclassifier = pickle.load(pickle_in)\r\npicky_in = open(\"cluster.pkl\", \"rb\")\r\nclusts = pickle.load(picky_in)\r\n\r\n\r\n@app.route('/')\r\ndef welcome():\r\n return \"Welcome All\"\r\n\r\n\r\n@app.route('/predict_file', methods=[\"POST\"])\r\ndef predict_note_file1():\r\n \"\"\"Let's Authenticate the Banks Note\r\n This is using docstrings for specifications.\r\n ---\r\n parameters:\r\n - name: file\r\n in: formData\r\n type: file\r\n required: true\r\n\r\n responses:\r\n 200:\r\n description: The output values\r\n\r\n \"\"\"\r\n df_test = pd.read_csv(request.files.get(\"file\"))\r\n print(df_test.head())\r\n prediction = classifier.predict(df_test)\r\n\r\n return str(list(prediction))\r\n\r\n\r\n@app.route('/predict_similar', methods=[\"POST\"])\r\ndef predict_note_file():\r\n \"\"\"Let's Cluster the Test cases for similarity Level\r\n This is using docstrings for specifications.\r\n ---\r\n parameters:\r\n - name: file\r\n in: formData\r\n type: file\r\n required: true\r\n\r\n responses:\r\n 200:\r\n description: The output values\r\n\r\n \"\"\"\r\n df_test = pd.read_csv(request.files.get(\"file\"), encoding='unicode_escape')\r\n df_test = df_test.dropna(axis=0, how='any')\r\n df_test['combine6'] = df_test.iloc[:, 1] + df_test.iloc[:, 2] + df_test.iloc[:, 3]\r\n vec = TfidfVectorizer(stop_words=\"english\", ngram_range=(1, 3))\r\n vec.fit(df_test.combine6.values)\r\n features = vec.transform(df_test.combine6.values)\r\n\r\n clustr = KMeans(init='k-means++', n_clusters=5, n_init=10)\r\n clustr.fit(features)\r\n df_test['cluster_labels'] = clustr.labels_\r\n output = StringIO()\r\n df_test.to_csv(output)\r\n return Response(output.getvalue(), mimetype=\"text/csv\")\r\n # return \"Check the file is generated\"\r\n # --resp = make_response(df_test.to_csv())\r\n # resp.headers[\"Content-Disposition\"] = (\"attachment; filename=%s\" % filename)\r\n # resp.headers[\"Content-Disposition\"] = \"attachment; filename=export.csv\"\r\n # --resp.headers[\"Content-Type\"] = \"text/csv\"\r\n # resp.headers[\"Content-Disposition\"] = (\"attachment; filename=%s\" % filename)\r\n # --return resp\r\n # & buffer = StringIO()\r\n # & df_test.to_csv(buffer, encoding='utf-8')\r\n # & buffer.seek(0)\r\n # & return send_file(buffer, attachment_filename=\"test.csv\", mimetype='text/csv')\r\n\r\n\r\n# return make_response(df_test.to_csv(), mimetype=\"text/csv\")\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flasgger.Swagger", "line_number": 16, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.files.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.files.get", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 75, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "135489843", "text": "# ------------------------------------------------------------------------------------------------ #\n# MIT License #\n# #\n# Copyright (c) 2020, Microsoft Corporation #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #\n# and associated documentation files (the \"Software\"), to deal in the Software without #\n# restriction, including without limitation the rights to use, copy, modify, merge, publish, #\n# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in all copies or #\n# substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #\n# ------------------------------------------------------------------------------------------------ #\n\nfrom functools import partial\nfrom collections import namedtuple\n\nimport jax\nimport jax.numpy as jnp\nimport haiku as hk\nfrom gym.spaces import Discrete, Box\n\nfrom .._base.test_case import TestCase\nfrom ..utils import safe_sample\nfrom .stochastic_v import StochasticV\n\ndiscrete = Discrete(7)\nboxspace = Box(low=0, high=1, shape=(3, 5))\nnum_bins = 20\n\nEnv = namedtuple('Env', ('observation_space', 'action_space'))\n\n\ndef func(S, is_training):\n batch_norm = hk.BatchNorm(False, False, 0.99)\n logits = hk.Sequential((\n hk.Flatten(),\n hk.Linear(8), jax.nn.relu,\n partial(hk.dropout, hk.next_rng_key(), 0.25 if is_training else 0.),\n partial(batch_norm, is_training=is_training),\n hk.Linear(8), jnp.tanh,\n hk.Linear(num_bins),\n ))\n return {'logits': logits(S)}\n\n\nclass TestStochasticV(TestCase):\n def test_init(self):\n StochasticV(func, Env(boxspace, boxspace), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(boxspace, discrete), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(discrete, boxspace), (-10, 10), num_bins=num_bins)\n StochasticV(func, Env(discrete, discrete), (-10, 10), num_bins=num_bins)\n\n # test_call_* ##################################################################################\n\n def test_call_discrete(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_, logp = v(s, return_logp=True)\n print(v_, logp, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n self.assertArraySubdtypeFloat(logp)\n self.assertArrayShape(logp, ())\n\n def test_call_boxspace(self):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_, logp = v(s, return_logp=True)\n print(v_, logp, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n self.assertArraySubdtypeFloat(logp)\n self.assertArrayShape(logp, ())\n\n # test_mode_* ##################################################################################\n\n def test_mode_discrete(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_ = v.mode(s)\n print(v_, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n\n def test_mode_boxspace(self):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n\n s = safe_sample(env.observation_space, seed=17)\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n v_ = v.mode(s)\n print(v_, env.observation_space)\n self.assertIn(v_, Box(*value_range, shape=()))\n\n def test_function_state(self):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n\n v = StochasticV(func, env, value_range, num_bins=num_bins, random_seed=19)\n\n print(v.function_state)\n batch_norm_avg = v.function_state['batch_norm/~/mean_ema']['average']\n self.assertArrayShape(batch_norm_avg, (1, 8))\n self.assertArrayNotEqual(batch_norm_avg, jnp.zeros_like(batch_norm_avg))\n\n # other tests ##################################################################################\n\n def test_bad_input_signature(self):\n def badfunc(S, is_training, x):\n pass\n msg = (\n r\"func has bad signature; \"\n r\"expected: func\\(S, is_training\\), \"\n r\"got: func\\(S, is_training, x\\)\"\n )\n with self.assertRaisesRegex(TypeError, msg):\n env = Env(boxspace, discrete)\n value_range = (-10, 10)\n StochasticV(badfunc, env, value_range, num_bins=num_bins, random_seed=13)\n\n def test_bad_output_structure(self):\n def badfunc(S, is_training):\n dist_params = func(S, is_training)\n dist_params['foo'] = jnp.zeros(1)\n return dist_params\n msg = (\n r\"func has bad return tree_structure, \"\n r\"expected: PyTreeDef\\({'logits': \\*}\\), \"\n r\"got: PyTreeDef\\({'foo': \\*, 'logits': \\*}\\)\"\n )\n with self.assertRaisesRegex(TypeError, msg):\n env = Env(discrete, discrete)\n value_range = (-10, 10)\n StochasticV(badfunc, env, value_range, num_bins=num_bins, random_seed=13)\n", "sub_path": "coax/_core/stochastic_v_test.py", "file_name": "stochastic_v_test.py", "file_ext": "py", "file_size_in_byte": 6723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gym.spaces.Discrete", "line_number": 34, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 38, "usage_type": "call"}, {"api_name": "haiku.BatchNorm", "line_number": 42, "usage_type": "call"}, {"api_name": "haiku.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "haiku.Flatten", "line_number": 44, "usage_type": "call"}, {"api_name": "haiku.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "jax.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 46, "usage_type": "call"}, {"api_name": "haiku.dropout", "line_number": 46, "usage_type": "attribute"}, {"api_name": "haiku.next_rng_key", "line_number": 46, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 47, "usage_type": "call"}, {"api_name": "haiku.Linear", "line_number": 48, "usage_type": "call"}, {"api_name": "jax.numpy.tanh", "line_number": 48, "usage_type": "attribute"}, {"api_name": "jax.numpy", "line_number": 48, "usage_type": "name"}, {"api_name": "haiku.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "_base.test_case.TestCase", "line_number": 54, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 56, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 57, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 58, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 67, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 68, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 80, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 81, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 95, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 96, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.safe_sample", "line_number": 106, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 107, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 111, "usage_type": "call"}, {"api_name": "stochastic_v.StochasticV", "line_number": 117, "usage_type": "call"}, {"api_name": "jax.numpy.zeros_like", "line_number": 122, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 122, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 137, "usage_type": "call"}, {"api_name": "jax.numpy.zeros", "line_number": 142, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 142, "usage_type": "name"}, {"api_name": "stochastic_v.StochasticV", "line_number": 152, "usage_type": "call"}]}
+{"seq_id": "430165915", "text": "import SamTech\nimport cv2\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam, SGD\n\nfrom numpy.random import seed\nseed(42)\nfrom tensorflow import set_random_seed\nset_random_seed(2)\nfrom sklearn.metrics import classification_report, confusion_matrix\n\norigin_dir = 'SamTech/Vision/dataset/lesion/test/'\n# origin_dir = 'SamTech/Vision/dataset/lesion/train_2c/'\n\nsammy = SamTech.Vision.Classifier(category = 'lesion')\n# sammy.weight_path = 'SamTech/Vision/ckpt/'\nsammy.classes = ['BENIGN', 'MALIGNANT']\nsammy.classes.sort()\n\n\nsammy.load_model('inceptionResnetV2_lesion-320-320-3-c2-30-0.73.hdf5')\n\n\ntest_list = ['ML16','NM_small','ML17']\n# test_list = ['MALIGNANT','BENIGN']\n# test_list.pop(0)\ny_true = []\ny_pred = []\n\n\nfor folder in test_list:\n print ('x')\n _dir = origin_dir+folder\n for i in os.scandir(_dir):\n if 'NM' in folder:\n y_true.append(0)\n act = \"BENIGN\"\n elif 'ML' in folder:\n y_true.append(1)\n act = \"MELANOMAS\"\n\n raw_prediction = sammy.predict(_dir+'/'+i.name)[0]\n\n if raw_prediction[0]>=0.5:\n _class = 'BENIGN'\n y_pred.append(0)\n else:\n _class = 'MALIGNANT'\n y_pred.append(1)\n print ('BENIGN {}% MALIGNANT {}% {} , actually {}'.format(int(raw_prediction[0]*100),int(raw_prediction[1]*100),_class,act))\nprint (confusion_matrix(y_true,y_pred))\nprint (classification_report(y_true, y_pred, target_names=['BENIGN', 'MALIGNANT']))\n", "sub_path": "predict_boost.py", "file_name": "predict_boost.py", "file_ext": "py", "file_size_in_byte": 1539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.set_random_seed", "line_number": 10, "usage_type": "call"}, {"api_name": "SamTech.Vision.Classifier", "line_number": 16, "usage_type": "call"}, {"api_name": "SamTech.Vision", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 53, "usage_type": "call"}]}
+{"seq_id": "38174237", "text": "import pygame\n\n\nclass Scene:\n\n def __init__(self, screen, backgroundColor):\n self.screen = screen\n self.r = backgroundColor[0]\n self.g = backgroundColor[1]\n self.b = backgroundColor[2]\n self.gameObjects = []\n self.state = 0\n self.crazy = True\n self.rDirection = 1\n self.gDirection = 1\n self.bDirection = 1\n\n def add(self, game_object):\n self.gameObjects.append(game_object)\n\n def update(self, velocity):\n if self.crazy:\n self.process_background()\n for obj in self.gameObjects:\n obj.update(velocity, self)\n\n def draw(self):\n self.screen.fill((self.r, self.g, self.b))\n for obj in self.gameObjects:\n obj.draw(self.screen)\n\n def flip(self):\n pygame.display.flip()\n\n def process_background(self):\n self.r += 2 * self.rDirection\n self.g += 1 * self.gDirection\n self.b += 3 * self.bDirection\n\n if self.r >= 255:\n self.r = 255\n self.rDirection = -1\n elif self.r <= 0:\n self.r = 0\n self.rDirection = 1\n if self.g >= 255:\n self.g = 255\n self.gDirection = -1\n elif self.g <= 0:\n self.g = 0\n self.gDirection = 1\n if self.b >= 255:\n self.b = 255\n self.bDirection = -1\n elif self.b <= 0:\n self.b = 0\n self.bDirection = 1\n", "sub_path": "src/scene.py", "file_name": "scene.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.display.flip", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 33, "usage_type": "attribute"}]}
+{"seq_id": "30365789", "text": "import pytest\nfrom flask import url_for\n\nfrom trends.utils.feed_request import FeedRequest\n\n\ndef test_feed_request():\n params = {\"offset\": 0, \"limit\": 20, \"tag\": \"blogger\"}\n fr = FeedRequest()\n resp = fr.get_response(**params)\n assert resp.status_code == 200\n return resp\n\n\ndef test_feed_proxy(client):\n params = {\"offset\": 0, \"limit\": 20, \"tag\": \"blogger\"}\n resp = client.get(url_for(\"trends.feed_proxy\", **params),)\n assert resp.status_code == 200\n\n results = resp.json[\"data\"]\n assert len(results) == len(test_feed_request().json())\n # assert results['items'] == test_feed_request().json()['items'] # не могу\n # проверить, т.к. поля постоянно меняю свою очередность\n\n\n@pytest.mark.parametrize(\n (\"params\", \"status\"),\n [\n ({\"offset\": \"0\", \"limit\": \"20\", \"tag\": \"bla-bla\",}, 200), # nonexistent tag\n ({\"limit\": \"20\", \"tag\": \"blogger\",}, 200), # not offset\n ({\"offset\": \"0\", \"tag\": \"blogger\",}, 200), # not limit\n ({\"offset\": \"0\", \"limit\": \"20\",}, 200), # not tag\n ({\"offset\": \"1256985555\", \"limit\": \"20\", \"tag\": \"blogger\",}, 200),\n ],\n)\ndef test_feed_proxy_bad_request(client, params, status):\n resp = client.get(url_for(\"trends.feed_proxy\", **params),)\n assert resp.status_code == status\n", "sub_path": "backend/web/tests/api/test_feed_proxy.py", "file_name": "test_feed_proxy.py", "file_ext": "py", "file_size_in_byte": 1330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "trends.utils.feed_request.FeedRequest", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}]}
+{"seq_id": "203664804", "text": "from itertools import permutations\n\nword = input(\"Enter String : \")\nperms = [''.join(p) for p in permutations(word)]\n#print (perms)\nlist2 = []\noptions = ([x for x in input(\"Enter Value to list :\").split()])\n#print (options)\n\nfor i in options:\n if i in perms:\n list2.append(i)\n\nprint (list2)\n", "sub_path": "Exercises/Deepak/Collections and Iterations/p8.py", "file_name": "p8.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "itertools.permutations", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "307296796", "text": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2020, David Swarbrick.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Tests for image resizing based on filesize.\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\n\nimport unittest\nimport os\n\nfrom test import _common\nfrom test.helper import TestHelper\nfrom beets.util import syspath\nfrom beets.util.artresizer import (\n pil_resize,\n im_resize,\n get_im_version,\n get_pil_version,\n)\n\n\nclass ArtResizerFileSizeTest(_common.TestCase, TestHelper):\n \"\"\"Unittest test case for Art Resizer to a specific filesize.\"\"\"\n\n IMG_225x225 = os.path.join(_common.RSRC, b\"abbey.jpg\")\n IMG_225x225_SIZE = os.stat(syspath(IMG_225x225)).st_size\n\n def setUp(self):\n \"\"\"Called before each test, setting up beets.\"\"\"\n self.setup_beets()\n\n def tearDown(self):\n \"\"\"Called after each test, unloading all plugins.\"\"\"\n self.teardown_beets()\n\n def _test_img_resize(self, resize_func):\n \"\"\"Test resizing based on file size, given a resize_func.\"\"\"\n # Check quality setting unaffected by new parameter\n im_95_qual = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0,\n )\n # check valid path returned - max_filesize hasn't broken resize command\n self.assertExists(im_95_qual)\n\n # Attempt a lower filesize with same quality\n im_a = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0.9 * os.stat(syspath(im_95_qual)).st_size,\n )\n self.assertExists(im_a)\n # target size was achieved\n self.assertLess(os.stat(syspath(im_a)).st_size,\n os.stat(syspath(im_95_qual)).st_size)\n\n # Attempt with lower initial quality\n im_75_qual = resize_func(\n 225,\n self.IMG_225x225,\n quality=75,\n max_filesize=0,\n )\n self.assertExists(im_75_qual)\n\n im_b = resize_func(\n 225,\n self.IMG_225x225,\n quality=95,\n max_filesize=0.9 * os.stat(syspath(im_75_qual)).st_size,\n )\n self.assertExists(im_b)\n # Check high (initial) quality still gives a smaller filesize\n self.assertLess(os.stat(syspath(im_b)).st_size,\n os.stat(syspath(im_75_qual)).st_size)\n\n @unittest.skipUnless(get_pil_version(), \"PIL not available\")\n def test_pil_file_resize(self):\n \"\"\"Test PIL resize function is lowering file size.\"\"\"\n self._test_img_resize(pil_resize)\n\n @unittest.skipUnless(get_im_version(), \"ImageMagick not available\")\n def test_im_file_resize(self):\n \"\"\"Test IM resize function is lowering file size.\"\"\"\n self._test_img_resize(im_resize)\n\n\ndef suite():\n \"\"\"Run this suite of tests.\"\"\"\n return unittest.TestLoader().loadTestsFromName(__name__)\n\n\nif __name__ == \"__main__\":\n unittest.main(defaultTest=\"suite\")\n", "sub_path": "test/test_art_resize.py", "file_name": "test_art_resize.py", "file_ext": "py", "file_size_in_byte": 3566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "test._common.TestCase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "test._common", "line_number": 35, "usage_type": "name"}, {"api_name": "test.helper.TestHelper", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "test._common.RSRC", "line_number": 38, "usage_type": "attribute"}, {"api_name": "test._common", "line_number": 38, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 39, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 39, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 66, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 70, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 70, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 71, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 71, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 86, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 86, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 90, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 90, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 91, "usage_type": "call"}, {"api_name": "beets.util.syspath", "line_number": 91, "usage_type": "call"}, {"api_name": "beets.util.artresizer.pil_resize", "line_number": 96, "usage_type": "argument"}, {"api_name": "unittest.skipUnless", "line_number": 93, "usage_type": "call"}, {"api_name": "beets.util.artresizer.get_pil_version", "line_number": 93, "usage_type": "call"}, {"api_name": "beets.util.artresizer.im_resize", "line_number": 101, "usage_type": "argument"}, {"api_name": "unittest.skipUnless", "line_number": 98, "usage_type": "call"}, {"api_name": "beets.util.artresizer.get_im_version", "line_number": 98, "usage_type": "call"}, {"api_name": "unittest.TestLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 110, "usage_type": "call"}]}
+{"seq_id": "615800104", "text": "from flask import Flask, jsonify, request, render_template\nfrom flask_cors import CORS, cross_origin\nimport requests, json\n\napp = Flask(__name__)\ncors = CORS(app)\n\n# SORRY UMBRA PLEASE DONT HURT ME\n\n@app.route('/allsongs')\ndef all_songs():\n\tall_songs_url = \"https://www.scoresaber.com/api.php?function=get-leaderboards&cat=3&page=1&limit=1000\"\n\tr = requests.get(all_songs_url)\n\treturn r.text\n\t\n@app.route('/history')\ndef user():\n\tuser_id = request.args.get('user_id', '')\n\t\n\tuser_history_url = \"https://new.scoresaber.com/api/player/\"+user_id+\"/scores/top/\"\t\n\tuser_profile_url = \"https://new.scoresaber.com/api/player/\"+user_id+\"/full\"\n\tcurrent_page = 1\n\tdata = []\n\t\n\tlast_is_ranked = True\n\twhile last_is_ranked:\n\t\t# print(\"Fetching page\", current_page)\n\t\tr = requests.get(user_history_url+str(current_page))\n\t\tdata.extend(json.loads(r.text)[\"scores\"])\n\n\t\tif data[-1][\"pp\"] == 0:\n\t\t\tlast_is_ranked = False\n\n\t\tcurrent_page += 1\n\n\t# remove non-ranked elements, veeeery lazyly\n\tdata = list(filter(lambda x: x[\"pp\"] > 0, data))\n\taggregate_data = {}\n\taggregate_data[\"scores\"] = data\n\tr = requests.get(user_profile_url)\n\taggregate_data[\"profile\"] = json.loads(r.text)\n\n\treturn jsonify(aggregate_data)\n\n@app.route('/')\ndef home():\n\treturn render_template('index.html')\n\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=80)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "366396052", "text": "import json\n\nd = dict()\ncount=1\nwith open(\"infytq.txt\") as f:\n while(True):\n query = f.readline()\n print(query)\n if query != '':\n reply = f.readline()\n f.readline()\n d[count] = {\"query\":query,\"reply\":reply}\n count+=1\n else:\n break\nf = open(\"data.json\",\"w+\")\nf.write(json.dumps(d))\nf.close()\n", "sub_path": "anotherTry/query_reply.py", "file_name": "query_reply.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.dumps", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "25620921", "text": "from Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom zope.component.hooks import getSite\n\nfrom pressapp.presscontent.pressinvitation import IPressInvitation\nfrom pressapp.presscontent.interfaces import IPressContent\n\n\nclass PrepareRelease(grok.View):\n grok.context(IPressContent)\n grok.require('cmf.ModifyPortalContent')\n grok.name('prepare-release')\n\n def update(self):\n self.recipient_count = len(self.recipient_list())\n self.has_recipients = self.recipient_count > 0\n self.subscriber_count = len(self.subscriber_list())\n self.has_subscribers = self.subscriber_count > 0\n\n def is_administrator(self):\n context = aq_inner(self.context)\n is_admin = False\n admin_roles = ('Site Administrator', 'Manager')\n user = api.user.get_current()\n roles = api.user.get_roles(username=user.getId(), obj=context)\n for role in roles:\n if role in admin_roles:\n is_admin = True\n return is_admin\n\n def is_pressinvitation(self):\n context = aq_inner(self.context)\n return IPressInvitation.providedBy(context)\n\n def recipient_list(self):\n context = aq_inner(self.context)\n recipients = getattr(context, 'recipients', '')\n return recipients\n\n def subscriber_list(self):\n portal = getSite()\n presscenter = portal['presscenter']\n subscribers = getattr(presscenter, 'subscribers', '')\n return subscribers\n\n def reformat_recipients(self, item):\n item = item.split(',', 1)\n return item\n\n def has_channel_info(self):\n context = aq_inner(self.context)\n channel = getattr(context, 'channel', None)\n if channel:\n return True\n\n def has_recipients_info(self):\n context = aq_inner(self.context)\n recipients = getattr(context, 'recipients', None)\n if recipients:\n return True\n", "sub_path": "src/pressapp.presscontent/pressapp/presscontent/preparerelease.py", "file_name": "preparerelease.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "five.grok.View", "line_number": 10, "usage_type": "attribute"}, {"api_name": "five.grok", "line_number": 10, "usage_type": "name"}, {"api_name": "five.grok.context", "line_number": 11, "usage_type": "call"}, {"api_name": "pressapp.presscontent.interfaces.IPressContent", "line_number": 11, "usage_type": "argument"}, {"api_name": "five.grok", "line_number": 11, "usage_type": "name"}, {"api_name": "five.grok.require", "line_number": 12, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 12, "usage_type": "name"}, {"api_name": "five.grok.name", "line_number": 13, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 13, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 22, "usage_type": "call"}, {"api_name": "plone.api.user.get_current", "line_number": 25, "usage_type": "call"}, {"api_name": "plone.api.user", "line_number": 25, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 25, "usage_type": "name"}, {"api_name": "plone.api.user.get_roles", "line_number": 26, "usage_type": "call"}, {"api_name": "plone.api.user", "line_number": 26, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 26, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 33, "usage_type": "call"}, {"api_name": "pressapp.presscontent.pressinvitation.IPressInvitation.providedBy", "line_number": 34, "usage_type": "call"}, {"api_name": "pressapp.presscontent.pressinvitation.IPressInvitation", "line_number": 34, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 37, "usage_type": "call"}, {"api_name": "zope.component.hooks.getSite", "line_number": 42, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 52, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 58, "usage_type": "call"}]}
+{"seq_id": "486789065", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python2.7/site-packages/weborg/lib/client.py\n# Compiled at: 2011-08-01 04:14:42\nimport subprocess, os, logging, re, memcache, settings\nlog = logging.getLogger('client')\nlog.setLevel(level=logging.DEBUG)\nhandler = logging.FileHandler(settings.LOG_FILE)\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nlog.addHandler(handler)\nmc = memcache.Client(['localhost:11211'])\n\ndef execute(cmd):\n env = os.environ\n env['LANG'] = 'en_US.utf8'\n full_cmd = settings.EMACS + ' -q -batch -l ~/.emacs.d/70-org-mode.el -l ' + settings.ORG_EL + \" -eval '%s'\" % cmd.encode('utf-8')\n p = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)\n stdout, stderr = p.communicate()\n log.debug('RPC: %s' % full_cmd)\n log.debug('Result: %s' % stdout)\n return stdout\n\n\ndef entry_index():\n idx = mc.get('idx')\n if idx:\n return idx\n else:\n cmd = '(entry-index)'\n idx = execute(cmd)\n mc.set('idx', idx)\n return idx\n\n\ndef entry_create(eid, jsonstr):\n cmd = '(entry-create \"%s\" \"%s\")' % (eid, re.escape(jsonstr))\n return execute(cmd)\n\n\ndef entry_new(eid):\n cmd = '(entry-new \"%s\")' % eid\n return execute(cmd)\n\n\ndef entry_update(eid, jsonstr):\n cmd = '(entry-update \"%s\" \"%s\")' % (eid, re.escape(jsonstr))\n return execute(cmd)\n\n\ndef entry_delete(eid):\n cmd = '(entry-delete \"%s\")' % eid\n return execute(cmd)\n\n\ndef entry_show(eid):\n cache = mc.get(str(eid))\n if cache:\n return cache\n else:\n cmd = '(entry-show \"%s\")' % eid\n result = execute(cmd)\n mc.set(str(eid), result)\n return result\n\n\ndef entry_edit(eid):\n cmd = '(entry-edit \"%s\")' % eid\n return execute(cmd)", "sub_path": "pycfiles/WebOrg-0.2.3dev.linux-i686.tar/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "settings.LOG_FILE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 11, "usage_type": "call"}, {"api_name": "memcache.Client", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "settings.EMACS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "settings.ORG_EL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "re.escape", "line_number": 38, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "612456646", "text": "import boto3\nfrom botocore.exceptions import ClientError\n\nclass DatabaseClient(object):\n def __init__(self, config):\n self._config = config\n self.connect(self._config)\n \n def connect(self, config):\n self.dynamodb = boto3.resource('dynamodb')\n \n def insert(self, table_name, data):\n table = self.dynamodb.Table(table_name)\n response = table.put_item(Item=data)\n return response\n\n def get(self, table_name, id):\n table = self.dynamodb.Table(table_name)\n scan_kwarg = {\n 'FilterExpression': 'user_id = :val',\n 'ExpressionAttributeValues': {':val': id}\n }\n done = False\n start_key = None\n rows = []\n i = 0\n while not done:\n i+=1\n if start_key:\n scan_kwarg['ExclusiveStartKey'] = start_key\n response = table.scan(**scan_kwarg)\n rows = rows + response.get('Items', [])\n start_key = response.get('LastEvaulatedKey', None)\n done = start_key is None\n return rows\n \n def delete_items(self, ids):\n response = [delete_item(id) for id in ids]\n return response\n\n def delete_item(table_name, id):\n table = self.dynamodb.Table(table_name)\n \n try:\n table.delete_item(\n Key = {\n 'id': id\n }\n )\n except ClientError as e:\n print(\"Error: \")\n print(e)\n return False\n else:\n return response", "sub_path": "backend/dynamodb/build/lib/dynamodb/database_client.py", "file_name": "database_client.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "boto3.resource", "line_number": 10, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "168393493", "text": "import pygame.ftfont\nimport pygame\npygame.init()\npygame.mixer.init()\n\nclass Music_button:\n def __init__(self, screen):\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.rect = pygame.Rect(0, 600, 67, 71)\n self.image=pygame.image.load('images/Sound.png')\n self.BGM_music=pygame.mixer.music.load(\"music/BGM.mp3\")\n self.Music_play=True\n self.music_continuous=True\n\n def music_play(self):\n if self.Music_play==True and self.music_continuous==True:\n pygame.mixer.music.play()\n self.music_continuous=False\n\n def Music_button_play(self):\n if self.Music_play==False:\n self.Music_play=True\n self.music_continuous=True\n else:\n self.Music_play=False\n pygame.mixer_music.pause()\n\n def blit(self):\n self.screen.blit(self.image,self.rect)\n\n", "sub_path": "GAME/music_button.py", "file_name": "music_button.py", "file_ext": "py", "file_size_in_byte": 893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer_music.pause", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.mixer_music", "line_number": 27, "usage_type": "attribute"}]}
+{"seq_id": "381817922", "text": "''' create_unique_drug_order_txt.py '''\n\nimport sqlalchemy\nfrom db import Db\nfrom dotenv import load_dotenv\nfrom os.path import join, dirname, os\nfrom sqlalchemy.sql import select\n\n\n## load environment values from .env\ndotenv_path = join(dirname(__file__), '../.env')\nload_dotenv(dotenv_path)\n\n## connect to postgres\ndb = Db()\nconnection = db.connect_postgres()\nconn = connection[0]\nmeta = connection[1]\n\nmediSpanTable = meta.tables['medispan_uniquegpi']\ns = select([\n mediSpanTable.c.drug_name,\n]) \\\n.order_by(mediSpanTable.c.drug_name) \\\n.distinct()\n\nDRUG_FILE = os.getenv(\"DRUG_LIST\")\nf = open(DRUG_FILE, \"w\")\n\nresult = conn.execute(s)\nfor drugName in result:\n drug = drugName[0].replace('\"', '')\n f.write('\"' + drug + '\",')\n\nf.close()\n", "sub_path": "pharma/deprecated/create_unique_drug_order_txt.py", "file_name": "create_unique_drug_order_txt.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 12, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 15, "usage_type": "call"}, {"api_name": "db.connect_postgres", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.select", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.os", "line_number": 27, "usage_type": "name"}]}
+{"seq_id": "55878360", "text": "import json\nimport os\nimport sys\nimport pytest\nimport pep8\n\nsys.path.insert(0, os.path.abspath(__file__ + \"/../..\"))\n\nfrom jsane import loads, dumps, JSaneException, from_dict\nfrom jsane.traversable import Traversable\n\n\nclass TestClass:\n @pytest.fixture(autouse=True)\n def create_data(self):\n self.json1 = \"\"\"\n {\n \"r\": \"yo\",\n \"key_1\": \"value_1\",\n \"key_2\": {\n \"key_21\": [\n [2100, 2101],\n [2110, 2111]\n ],\n \"key_22\": [\"l1\", \"l2\"],\n \"key_23\": {\"key_231\":\"v\"},\n \"key_24\": {\n \"key_241\": 502,\n \"key_242\": [\n [5, 0],\n [7, 0]\n ],\n \"key_243\": {\n \"key_2431\": [0, 0],\n \"key_2432\": 504,\n \"key_2433\": [\n [11451, 0],\n [11452, 0]\n ]\n },\n \"key_244\": {\n \"key_2441\": {\n \"key_24411\": {\n \"key_244111\": \"v_24411\",\n \"key_244112\": [[5549, 0]]\n },\n \"key_24412\": \"v_24412\"\n },\n \"key_2442\": [\"ll1\", \"ll2\"]\n }\n }\n }\n }\n \"\"\"\n self.dict1 = {\"foo\": \"bar\"}\n\n def test_wrapper(self):\n assert loads(dumps(self.dict1)).r() == self.dict1\n assert json.dumps(self.dict1) == dumps(self.dict1)\n assert self.dict1[\"foo\"] == from_dict(self.dict1).foo.r()\n assert loads(dumps(self.dict1)), Traversable(self.dict1)\n\n def test_access(self):\n j = loads(self.json1)\n assert j.key_1.r() == \"value_1\"\n assert j[\"r\"].r() == \"yo\"\n assert j.key_2.key_21[1][1].r() == 2111\n\n def test_exception(self):\n j = loads(self.json1)\n with pytest.raises(JSaneException):\n j.key_2.nonexistent[0].r()\n with pytest.raises(JSaneException):\n j.key_2.key_21[7].r()\n with pytest.raises(JSaneException):\n j.key_1.key_2.r()\n with pytest.raises(IndexError):\n j.key_2.key_24.key_244.key_2442[0].r()[7]\n with pytest.raises(JSaneException):\n j.key_2.key_24.key_244.key_2442[0][7].r()\n\n def test_default(self):\n j = loads(self.json1)\n assert j.key_1.key_2.r(None) is None\n assert j.key_2.nonexistent[0].r(\"default\") == \"default\"\n assert j.key_2.key_21[7].r(\"default\") == \"default\"\n with pytest.raises(IndexError):\n j.key_2.key_24.key_244.key_2442[0].r(\"default\")[7]\n\n def test_resolution(self):\n j = loads(self.json1)\n assert j.key_2.key_21[0].r() == [2100, 2101]\n assert j.key_2.key_21[0].r() == [2100, 2101]\n assert j.key_2.key_24.key_244.key_2442[0].r()[0] == \"l\"\n\n def test_pep8(self):\n pep8style = pep8.StyleGuide([['statistics', True],\n ['show-sources', True],\n ['repeat', True],\n ['ignore', \"E501\"],\n ['paths', [os.path.dirname(\n os.path.abspath(__file__))]]],\n parse_argv=False)\n report = pep8style.check_files()\n assert report.total_errors == 0\n", "sub_path": "tests/test_jsane.py", "file_name": "test_jsane.py", "file_ext": "py", "file_size_in_byte": 3381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "jsane.from_dict", "line_number": 60, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.traversable.Traversable", "line_number": 61, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 71, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 71, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 73, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 73, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 75, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 75, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 79, "usage_type": "call"}, {"api_name": "jsane.JSaneException", "line_number": 79, "usage_type": "argument"}, {"api_name": "jsane.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 87, "usage_type": "call"}, {"api_name": "jsane.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "pep8.StyleGuide", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}]}
+{"seq_id": "516059555", "text": "from django.core.urlresolvers import reverse_lazy\nfrom django.db import transaction\nfrom django.views.generic import CreateView, UpdateView, DeleteView, ListView\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom .models import Pazar,Bolum,Reyon,UrunAdi,UrunTipi,UrunOzellikleri\nfrom .forms import PazarForm,BolumForm,ReyonForm,UrunTipiForm,UrunAdiForm,UrunOzellikleriForm\n\n\ndef PazarList(request): \n pazarlar=Pazar.objects.all()\n context={\n 'pazarlar':pazarlar,\n }\n return render(request,'main/pazar_list.html',context)\n\ndef PazarDetail(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n bolum=Bolum.objects.all().filter(pazar_id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'pazar':pazar,\n 'bolum':bolum, \n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail.html',context)\n\ndef BolumDetail(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'bolum':bolum, \n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-bolum.html',context)\n\ndef ReyonDetail(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'reyon':reyon,\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-reyon.html',context)\ndef UrunTipiDetail(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'urunadi':urunadi,\n 'uruntipi':uruntipi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-uruntipi.html',context)\ndef UrunAdiDetail(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n urunozellikleri=UrunOzellikleri.objects.all().filter(pazar_id=id)\n context = {\n 'urunadi':urunadi,\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-urunadi.html',context)\ndef UrunOzellikDetail(request,id):\n urunozellikleri=get_object_or_404(UrunOzellikleri,id=id)\n context = {\n 'urunozellikleri':urunozellikleri,\n }\n return render(request,'main/detail-urunozellikleri.html',context)\n\ndef PazarCreate(request):\n if request.method == \"POST\":\n form = PazarForm(request.POST)\n if form.is_valid():\n pazar = form.save(commit=False)\n pazar.save()\n return redirect('pazar-list')\n else:\n form = PazarForm()\n return render(request, 'main/create.html', {'form': form})\ndef BolumCreate(request,id):\n if request.method == \"POST\":\n form = BolumForm(request.POST)\n if form.is_valid():\n bolum = form.save(commit=False)\n bolum.pazar_id=id\n bolum.save()\n return redirect('pazar-list')\n else:\n form = BolumForm()\n return render(request, 'main/create.html', {'form': form})\ndef ReyonCreate(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n if request.method == \"POST\":\n form = ReyonForm(request.POST)\n if form.is_valid():\n reyon = form.save(commit=False)\n reyon.pazar_id=bolum.pazar_id\n reyon.bolum_id=id\n reyon.save()\n return redirect('pazar-list')\n else:\n form = ReyonForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunTipiCreate(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n if request.method == \"POST\":\n form = UrunTipiForm(request.POST)\n if form.is_valid():\n uruntipi = form.save(commit=False)\n uruntipi.pazar_id=reyon.pazar_id\n uruntipi.reyon_id=id\n uruntipi.save()\n return redirect('pazar-list')\n else:\n form = UrunTipiForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunAdiCreate(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id) \n if request.method == \"POST\":\n form = UrunAdiForm(request.POST)\n if form.is_valid():\n urunadi = form.save(commit=False)\n urunadi.pazar_id=uruntipi.pazar_id\n urunadi.urunTipi_id=id\n urunadi.save()\n return redirect('pazar-list')\n else:\n form = UrunAdiForm()\n return render(request, 'main/create.html', {'form': form})\ndef UrunOzellikCreate(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n \n if request.method == \"POST\":\n form = UrunOzellikleriForm(request.POST)\n if form.is_valid():\n urunozellik = form.save(commit=False)\n urunozellik.pazar_id=urunadi.pazar_id\n urunozellik.urunadi_id=id\n urunozellik.stok=5\n urunozellik.save()\n return redirect('pazar-list')\n else:\n form = UrunOzellikleriForm()\n return render(request, 'main/create.html', {'form': form})\n\ndef PazarUpdate(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n bolum=Bolum.objects.all().filter(pazar_id=id)\n reyon=Reyon.objects.all().filter(pazar_id=id)\n uruntipi=UrunTipi.objects.all().filter(pazar_id=id)\n urunadi=UrunAdi.objects.all().filter(pazar_id=id)\n urunoz=UrunOzellikleri.objects.all().filter(pazar_id=id)\n root_id=id\n form=PazarForm(request.POST or None,instance=pazar)\n if form.is_valid():\n pazar=form.save(commit=False)\n pazar.save()\n \n context = {\n 'root_id':root_id,\n 'form':form,\n 'bolum':bolum,\n 'reyon':reyon,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n\n return render(request,'main/update.html',context)\ndef BolumUpdate(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n reyon=Reyon.objects.all().filter(bolum_id=bolum.id)\n uruntipi=UrunTipi.objects.all().filter(reyon_id=reyon.first().id)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.first().id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n form=BolumForm(request.POST or None,instance=bolum)\n\n if form.is_valid():\n bolum=form.save(commit=False)\n bolum.save()\n context = {\n 'form':form,\n 'reyon':reyon,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-bolum.html',context)\ndef ReyonUpdate(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n form=ReyonForm(request.POST or None,instance=reyon)\n uruntipi=UrunTipi.objects.all().filter(reyon_id=reyon.id)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.first().id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n if form.is_valid():\n reyon=form.save(commit=False)\n reyon.save()\n context = {\n 'form':form,\n 'uruntipi':uruntipi,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-reyon.html',context)\ndef UrunTipiUpdate(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n form=UrunTipiForm(request.POST or None,instance=uruntipi)\n urunadi=UrunAdi.objects.all().filter(urunTipi_id=uruntipi.id)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.first().id)\n\n if form.is_valid():\n uruntipi=form.save(commit=False)\n uruntipi.save()\n context = {\n 'form':form,\n 'urunadi':urunadi,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-uruntipi.html',context)\ndef UrunAdiUpdate(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n form=UrunAdiForm(request.POST or None,instance=urunadi)\n urunoz=UrunOzellikleri.objects.all().filter(urunadi_id=urunadi.id)\n\n if form.is_valid():\n urunadi=form.save(commit=False)\n urunadi.save()\n context = {\n 'form':form,\n 'urunoz':urunoz,\n }\n return render(request,'main/update-urunadi.html',context)\ndef UrunOzellikUpdate(request,id):\n urunoz=get_object_or_404(UrunOzellikleri,id=id)\n form=UrunOzellikleriForm(request.POST or None,instance=urunoz)\n if form.is_valid():\n urunoz=form.save(commit=False)\n urunoz.save()\n context = {\n 'form':form,\n }\n return render(request,'main/update-urunozellik.html',context)\n\ndef PazarDelete(request,id):\n pazar=get_object_or_404(Pazar,id=id)\n pazar.delete()\n return redirect(\"pazar-list\")\ndef BolumDelete(request,id):\n bolum=get_object_or_404(Bolum,id=id)\n bolum.delete()\n return redirect(\"pazar-list\")\ndef ReyonDelete(request,id):\n reyon=get_object_or_404(Reyon,id=id)\n reyon.delete()\n return redirect(\"pazar-list\")\ndef UrunTipiDelete(request,id):\n uruntipi=get_object_or_404(UrunTipi,id=id)\n uruntipi.delete()\n return redirect(\"pazar-list\")\ndef UrunAdiDelete(request,id):\n urunadi=get_object_or_404(UrunAdi,id=id)\n urunadi.delete()\n return redirect(\"pazar-list\")\ndef UrunOzellikleriDelete(request,id):\n urunoz=get_object_or_404(UrunOzellikleri,id=id)\n urunoz.delete()\n return redirect(\"pazar-list\")\n\n\n", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.Pazar.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Pazar.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Pazar", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.Bolum.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Bolum.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Bolum", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Reyon.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 19, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 20, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 21, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 34, "usage_type": "argument"}, {"api_name": "models.Reyon.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 35, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 36, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 36, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 37, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 49, "usage_type": "argument"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 50, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 51, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 61, "usage_type": "argument"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 62, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 62, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 63, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 71, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 71, "usage_type": "argument"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 72, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 79, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 79, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "forms.PazarForm", "line_number": 87, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "forms.PazarForm", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 94, "usage_type": "call"}, {"api_name": "forms.BolumForm", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "forms.BolumForm", "line_number": 104, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 107, "usage_type": "argument"}, {"api_name": "forms.ReyonForm", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 115, "usage_type": "call"}, {"api_name": "forms.ReyonForm", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 120, "usage_type": "argument"}, {"api_name": "forms.UrunTipiForm", "line_number": 122, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "forms.UrunTipiForm", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 133, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 133, "usage_type": "argument"}, {"api_name": "forms.UrunAdiForm", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "forms.UrunAdiForm", "line_number": 143, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 144, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 146, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 146, "usage_type": "argument"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 156, "usage_type": "call"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 158, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 159, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 162, "usage_type": "argument"}, {"api_name": "models.Bolum.objects.all", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Bolum.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.Bolum", "line_number": 163, "usage_type": "name"}, {"api_name": "models.Reyon.objects.all", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 164, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 165, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 165, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 166, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 166, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 167, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 167, "usage_type": "name"}, {"api_name": "forms.PazarForm", "line_number": 169, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 184, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 186, "usage_type": "argument"}, {"api_name": "models.Reyon.objects.all", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Reyon.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Reyon", "line_number": 187, "usage_type": "name"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 188, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 188, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 188, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 189, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 189, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 189, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 190, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 190, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 190, "usage_type": "name"}, {"api_name": "forms.BolumForm", "line_number": 192, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 204, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 206, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 206, "usage_type": "argument"}, {"api_name": "forms.ReyonForm", "line_number": 207, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects.all", "line_number": 208, "usage_type": "call"}, {"api_name": "models.UrunTipi.objects", "line_number": 208, "usage_type": "attribute"}, {"api_name": "models.UrunTipi", "line_number": 208, "usage_type": "name"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 209, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 209, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 210, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 210, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 210, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 221, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 223, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 223, "usage_type": "argument"}, {"api_name": "forms.UrunTipiForm", "line_number": 224, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects.all", "line_number": 225, "usage_type": "call"}, {"api_name": "models.UrunAdi.objects", "line_number": 225, "usage_type": "attribute"}, {"api_name": "models.UrunAdi", "line_number": 225, "usage_type": "name"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 226, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 226, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 236, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 238, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 238, "usage_type": "argument"}, {"api_name": "forms.UrunAdiForm", "line_number": 239, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects.all", "line_number": 240, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri.objects", "line_number": 240, "usage_type": "attribute"}, {"api_name": "models.UrunOzellikleri", "line_number": 240, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 249, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 251, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 251, "usage_type": "argument"}, {"api_name": "forms.UrunOzellikleriForm", "line_number": 252, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 259, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 262, "usage_type": "call"}, {"api_name": "models.Pazar", "line_number": 262, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 264, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 266, "usage_type": "call"}, {"api_name": "models.Bolum", "line_number": 266, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 268, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 270, "usage_type": "call"}, {"api_name": "models.Reyon", "line_number": 270, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 272, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 274, "usage_type": "call"}, {"api_name": "models.UrunTipi", "line_number": 274, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 276, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 278, "usage_type": "call"}, {"api_name": "models.UrunAdi", "line_number": 278, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 280, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 282, "usage_type": "call"}, {"api_name": "models.UrunOzellikleri", "line_number": 282, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 284, "usage_type": "call"}]}
+{"seq_id": "513457165", "text": "from aiohttp import web\nfrom functools import partial\nimport asyncio\nimport aiofiles\nimport datetime\nimport os\nimport logging\nimport argparse\n\n\nasync def archivate(delay, path_to_dir, request):\n response = web.StreamResponse()\n\n archive_hash = request.match_info.get('archive_hash')\n\n archive_path = os.path.abspath(f'{path_to_dir}/{archive_hash}')\n\n # Если каталога не сужествует возвращать 404 Not Found \n if not os.path.exists(archive_path):\n raise web.HTTPNotFound()\n\n response.headers['Content-Type'] = 'application/zip'\n response.headers['Content-Disposition'] = f'attachment; filename=\"{archive_hash}.zip\"'\n await response.prepare(request)\n\n cmd = ['zip', '-r', '-', archive_path]\n process = await asyncio.create_subprocess_exec(\n *cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n try:\n while True:\n data = await process.stdout.read(100000)\n if not data:\n break\n\n logger.debug( u'Sending archive chunk ...' )\n await response.write(data)\n await asyncio.sleep(delay)\n\n except asyncio.CancelledError:\n logger.debug( u'Download was interrupted' )\n process.kill()\n await process.communicate()\n raise\n finally:\n response.force_close()\n\n return response\n\n\nasync def handle_index_page(request):\n async with aiofiles.open('index.html', mode='r') as index_file:\n index_contents = await index_file.read()\n return web.Response(text=index_contents, content_type='text/html')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Microservice for downloading archives.'\n )\n parser.add_argument('-l', '--logger', help='Enable logger',\n action='store_true')\n parser.add_argument('-d', '--delay', default=1, type=int, help='Set delay')\n parser.add_argument('-p', '--path', default='./test_photos', type=str,\n help='Specify the path to directory')\n args = parser.parse_args()\n\n logging.basicConfig(level = logging.DEBUG)\n logger = logging.getLogger('Logger')\n logger.disabled = not args.logger\n\n app = web.Application()\n app.add_routes([\n web.get('/', handle_index_page),\n web.get('/archive/{archive_hash}/', partial(archivate, args.delay, args.path)),\n ])\n web.run_app(app)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "aiohttp.web.StreamResponse", "line_number": 12, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "aiohttp.web.HTTPNotFound", "line_number": 20, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 20, "usage_type": "name"}, {"api_name": "asyncio.create_subprocess_exec", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.subprocess", "line_number": 29, "usage_type": "attribute"}, {"api_name": "asyncio.subprocess", "line_number": 30, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 42, "usage_type": "attribute"}, {"api_name": "aiofiles.open", "line_number": 54, "usage_type": "call"}, {"api_name": "aiohttp.web.Response", "line_number": 56, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 56, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 70, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 71, "usage_type": "call"}, {"api_name": "aiohttp.web.Application", "line_number": 74, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 74, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 76, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 76, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 77, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 77, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 77, "usage_type": "call"}, {"api_name": "aiohttp.web.run_app", "line_number": 79, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 79, "usage_type": "name"}]}
+{"seq_id": "162086703", "text": "import cv2\n\ncameraCapture = cv2.VideoCapture(0)\nfps = 30 # assumption\nsize = (int(cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\nvideoWriter = cv2.VideoWriter(\n 'img/output2.avi', cv2.VideoWriter_fourcc('I', '4', '2', '0'),\n fps, size\n)\n\nsuccess, frame = cameraCapture.read()\nnumFrameRemaining = 10 * fps - 1\n\nwhile success and numFrameRemaining > 0 :\n videoWriter.write(frame)\n success, frame = cameraCapture.read()\n numFrameRemaining -= 1\n\ncameraCapture.release()", "sub_path": "learning-opencv-computer-vision-book/handling-files-camera-and-gui/basic-io/capturing-camer-frames.py", "file_name": "capturing-camer-frames.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "545286609", "text": "# TENSORFLOW 2.0\nimport glob\nimport pickle\nimport numpy\nfrom music21 import converter, instrument, note, chord\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Bidirectional, LSTM, concatenate, Input\nfrom tensorflow.keras.layers import BatchNormalization as BatchNorm\nimport tensorflow.keras.utils as np_utils\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import Model\n\ndef train_network():\n\t\"\"\" Train a Neural Network to generate music \"\"\"\n\tnotes, offsets, durations = get_notes()\n\n\tn_vocab_notes = len(set(notes))\n\tnetwork_input_notes, network_output_notes = prepare_sequences(notes, n_vocab_notes)\n\t\n\tn_vocab_offsets = len(set(offsets))\n\tnetwork_input_offsets, network_output_offsets = prepare_sequences(offsets, n_vocab_offsets)\n\t\n\tn_vocab_durations = len(set(durations))\n\tnetwork_input_durations, network_output_durations = prepare_sequences(durations, n_vocab_durations)\n\tmodel = create_network(network_input_notes, n_vocab_notes, network_input_offsets, n_vocab_offsets, network_input_durations, n_vocab_durations)\n\ttrain(model, network_input_notes, network_input_offsets, network_input_durations, network_output_notes, network_output_offsets, network_output_durations)\n\ndef get_notes():\n\t\"\"\" Get all the notes and chords from the midi files in the ./midi_songs directory \"\"\"\n\tnotes = []\n\toffsets = []\n\tdurations = []\n\n\tfor file in glob.glob(\"classical-piano-type0/*.mid\"):\n\t\tmidi = converter.parse(file)\n\n\t\tprint(\"Parsing %s\" % file)\n\n\t\tnotes_to_parse = None\n\n\t\ttry: # file has instrument parts\n\t\t\ts2 = instrument.partitionByInstrument(midi)\n\t\t\tnotes_to_parse = s2.parts[0].recurse() \n\t\texcept: # file has notes in a flat structure\n\t\t\tnotes_to_parse = midi.flat.notes\n\t\t\n\t\t\n\t\toffsetBase = 0\n\t\tfor element in notes_to_parse:\n\t\t\tisNoteOrChord = False\n\t\t\t\n\t\t\tif isinstance(element, note.Note):\n\t\t\t\tnotes.append(str(element.pitch))\n\t\t\t\tisNoteOrChord = True\n\t\t\telif isinstance(element, chord.Chord):\n\t\t\t\tnotes.append('.'.join(str(n) for n in element.normalOrder))\n\t\t\t\tisNoteOrChord = True\n\t\t\t\n\t\t\tif isNoteOrChord:\n\t\t\t\toffsets.append(str(element.offset - offsetBase))\n\t\t\t\tdurations.append(str(element.duration.quarterLength))\n\t\t\t\tisNoteOrChord = False\n\t\t\t\toffsetBase = element.offset\n\t\t\t\t\n\n\twith open('data/notes', 'wb') as filepath:\n\t\tpickle.dump(notes, filepath)\n\t\n\twith open('data/durations', 'wb') as filepath:\n\t\tpickle.dump(durations, filepath)\n\t\t\n\twith open('data/offsets', 'wb') as filepath:\n\t\tpickle.dump(offsets, filepath)\n\t\n\tprint(durations)\n\treturn notes, offsets, durations\n\ndef prepare_sequences(notes, n_vocab):\n\t\"\"\" Prepare the sequences used by the Neural Network \"\"\"\n\tsequence_length = 100\n\n\t# get all pitch names\n\tpitchnames = sorted(set(item for item in notes))\n\n\t # create a dictionary to map pitches to integers\n\tnote_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n\n\tnetwork_input = []\n\tnetwork_output = []\n\n\t# create input sequences and the corresponding outputs\n\tfor i in range(0, len(notes) - sequence_length, 1):\n\t\tsequence_in = notes[i:i + sequence_length]\n\t\tsequence_out = notes[i + sequence_length]\n\t\tnetwork_input.append([note_to_int[char] for char in sequence_in])\n\t\tnetwork_output.append(note_to_int[sequence_out])\n\n\tn_patterns = len(network_input)\n\n\t# reshape the input into a format compatible with LSTM layers\n\tnetwork_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))\n\t# normalize input\n\tnetwork_input = network_input / float(n_vocab)\n\n\tnetwork_output = np_utils.to_categorical(network_output)\n\n\treturn (network_input, network_output)\n\ndef create_network(network_input_notes, n_vocab_notes, network_input_offsets, n_vocab_offsets, network_input_durations, n_vocab_durations):\n\t\n\t# Branch of the network that considers notes\n\tinputNotesLayer = Input(shape=(network_input_notes.shape[1], network_input_notes.shape[2]))\n\tinputNotes = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_notes.shape[1], network_input_notes.shape[2]),\n\t\treturn_sequences=True\n\t)(inputNotesLayer)\n\tinputNotes = Dropout(0.2)(inputNotes)\n\t\n\t# Branch of the network that considers note offset\n\tinputOffsetsLayer = Input(shape=(network_input_offsets.shape[1], network_input_offsets.shape[2]))\n\tinputOffsets = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_offsets.shape[1], network_input_offsets.shape[2]),\n\t\treturn_sequences=True\n\t)(inputOffsetsLayer)\n\tinputOffsets = Dropout(0.2)(inputOffsets)\n\t\n\t# Branch of the network that considers note duration\n\tinputDurationsLayer = Input(shape=(network_input_durations.shape[1], network_input_durations.shape[2]))\n\tinputDurations = LSTM(\n\t\t256,\n\t\tinput_shape=(network_input_durations.shape[1], network_input_durations.shape[2]),\n\t\treturn_sequences=True\n\t)(inputDurationsLayer)\n\t#inputDurations = Dropout(0.3)(inputDurations)\n\tinputDurations = Dropout(0.2)(inputDurations)\n\t\n\t#Concatentate the three input networks together into one branch now\n\tinputs = concatenate([inputNotes, inputOffsets, inputDurations])\n\t\n\t# A cheeky LSTM to consider everything learnt from the three separate branches\n\tx = LSTM(512, return_sequences=True)(inputs)\n\tx = Dropout(0.3)(x)\n\tx = LSTM(512)(x)\n\tx = BatchNorm()(x)\n\tx = Dropout(0.3)(x)\n\tx = Dense(256, activation='relu')(x)\n\t\n\t#Time to split into three branches again...\n\t\n\t# Branch of the network that classifies the note\n\toutputNotes = Dense(128, activation='relu')(x)\n\toutputNotes = BatchNorm()(outputNotes)\n\toutputNotes = Dropout(0.3)(outputNotes)\n\toutputNotes = Dense(n_vocab_notes, activation='softmax', name=\"Note\")(outputNotes)\n\t\n\t# Branch of the network that classifies the note offset\n\toutputOffsets = Dense(128, activation='relu')(x)\n\toutputOffsets = BatchNorm()(outputOffsets)\n\toutputOffsets = Dropout(0.3)(outputOffsets)\n\toutputOffsets = Dense(n_vocab_offsets, activation='softmax', name=\"Offset\")(outputOffsets)\n\t\n\t# Branch of the network that classifies the note duration\n\toutputDurations = Dense(128, activation='relu')(x)\n\toutputDurations = BatchNorm()(outputDurations)\n\toutputDurations = Dropout(0.3)(outputDurations)\n\toutputDurations = Dense(n_vocab_durations, activation='softmax', name=\"Duration\")(outputDurations)\n\t\n\t# Tell Keras what our inputs and outputs are \n\tmodel = Model(inputs=[inputNotesLayer, inputOffsetsLayer, inputDurationsLayer], outputs=[outputNotes, outputOffsets, outputDurations])\n\t\n\t#Adam seems to be faster than RMSProp and learns better too \n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\t# Useful to try RMSProp though\n\t\n\t# LOAD WEIGHTS HERE IF YOU WANT TO CONTINUE TRAINING!\n\t#model.load_weights(weights_name)\n\n\treturn model\n\ndef train(model, network_input_notes, network_input_offsets, network_input_durations, network_output_notes, network_output_offsets, network_output_durations):\n\t\"\"\" train the neural network \"\"\"\n\tfilepath = \"weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5\"\n\tcheckpoint = ModelCheckpoint(\n\t\tfilepath,\n\t\tmonitor='loss',\n\t\tverbose=0,\n\t\tsave_best_only=True,\n\t\tmode='min'\n\t)\n\tcallbacks_list = [checkpoint]\n\n\tmodel.fit([network_input_notes, network_input_offsets, network_input_durations], [network_output_notes, network_output_offsets, network_output_durations], epochs=1000, batch_size=64, callbacks=callbacks_list, verbose=1)\n\nif __name__ == '__main__':\n\t#weights_name = 'weights-improvement-41-0.9199-bigger.hdf5'\n\ttrain_network()\n", "sub_path": "lstm-new-tf2.py", "file_name": "lstm-new-tf2.py", "file_ext": "py", "file_size_in_byte": 7377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 35, "usage_type": "call"}, {"api_name": "music21.converter.parse", "line_number": 36, "usage_type": "call"}, {"api_name": "music21.converter", "line_number": 36, "usage_type": "name"}, {"api_name": "music21.instrument.partitionByInstrument", "line_number": 43, "usage_type": "call"}, {"api_name": "music21.instrument", "line_number": 43, "usage_type": "name"}, {"api_name": "music21.note.Note", "line_number": 53, "usage_type": "attribute"}, {"api_name": "music21.note", "line_number": 53, "usage_type": "name"}, {"api_name": "music21.chord.Chord", "line_number": 56, "usage_type": "attribute"}, {"api_name": "music21.chord", "line_number": 56, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 71, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 74, "usage_type": "call"}, {"api_name": "music21.note", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils", "line_number": 106, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.keras.Model", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 186, "usage_type": "call"}]}
+{"seq_id": "46852250", "text": "import uuid\nimport time\nimport datetime\nfrom binascii import crc32\nfrom typing import Iterable\nfrom marshmallow import fields\nimport pytz\nfrom werkzeug.datastructures import ImmutableMultiDict\n\n\nclass TimeCased(fields.Field):\n def _serialize(self, value, attr, obj, **kwargs):\n if value is None:\n return \"\"\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n return pytz.utc.localize(value).astimezone(shanghai).strftime('%Y-%m-%d %H:%M:%S')\n\n def _deserialize(self, value, attr, data, **kwargs):\n return value\n\n\ndef create_uuid_and_crc32():\n uuid_ = str(uuid.uuid4())\n uuid_crc32 = crc32(uuid_.encode())\n return uuid_, uuid_crc32\n\n\ndef get_crc32(string):\n return crc32(string.encode())\n\n\ndef localtime_to_utctime(localtime: datetime) -> datetime:\n timestamp = time.mktime(localtime.timetuple())\n utc_datetime = datetime.utcfromtimestamp(timestamp)\n return utc_datetime\n\n\ndef utctime_to_localtime(utctime: datetime) -> datetime:\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n return pytz.utc.localize(utctime).astimezone(shanghai)\n\n\ndef pagination_slice(list_, page, page_size):\n start = (page - 1) * page_size\n return list_[start: start + page_size]\n\n\ndef drop_duplicates(iterator: Iterable) -> list:\n return list(set(iterator))\n\n\ndef delete_empty_data(data):\n new_args = dict()\n for key, value in data.items():\n if key not in [\"\", None] and value not in [\"\", None]:\n new_args[key] = value\n return ImmutableMultiDict(\n list(\n zip(\n new_args.keys(),\n new_args.values()\n )\n )\n )\n\n\ndef string_to_list(ids: str):\n if ids is not None:\n ids = ids.split(\",\")\n ids = list(map(lambda id_: int(id_), ids))\n return ids\n return None\n\n\ndef expire_time(expire_in):\n shanghai = pytz.timezone(\"Asia/Shanghai\")\n cos_policy_expiration = pytz.utc.localize(datetime.datetime.now().replace(\n microsecond=0) + datetime.timedelta(\n seconds=expire_in)).astimezone(shanghai).strftime('%Y-%m-%dT%H:%M:%S')\n return cos_policy_expiration\n", "sub_path": "python/marshmallow/time_utils.py", "file_name": "time_utils.py", "file_ext": "py", "file_size_in_byte": 2127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "marshmallow.fields.Field", "line_number": 11, "usage_type": "attribute"}, {"api_name": "marshmallow.fields", "line_number": 11, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 15, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 16, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 16, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 23, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 24, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 29, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.utcfromtimestamp", "line_number": 34, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 39, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 40, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Iterable", "line_number": 48, "usage_type": "name"}, {"api_name": "werkzeug.datastructures.ImmutableMultiDict", "line_number": 57, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 76, "usage_type": "call"}, {"api_name": "pytz.utc.localize", "line_number": 77, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "601646579", "text": "#!/usr/bin/env python\n# Tsung-Yi Lin \n# Ramakrishna Vedantam \n\nimport copy\nimport json\nimport math\nimport os\n\nimport numpy as np\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\n\n\nPUNCTUATIONS = [\"''\", \"'\", \"``\", \"`\", \"(\", \")\", \"{\", \"}\", \"[\", \"]\", \\\n \".\", \"?\", \"!\", \",\", \":\", \"-\", \"--\", \"...\", \";\"]\n\n\ndef term_frequency(sentence, ngrams=4):\n \"\"\"Given a sentence, calculates term frequency of tuples.\n\n Parameters\n ----------\n sentence : str\n Sentence whose term frequency has to be calculated.\n ngrams : int\n Number of n-grams for which term frequency is calculated.\n\n Returns\n -------\n dict\n {tuple : int} key-value pairs representing term frequency.\n \"\"\"\n sentence = sentence.lower().strip()\n for punc in PUNCTUATIONS:\n sentence = sentence.replace(punc, \"\")\n words = TreebankWordTokenizer().tokenize(sentence)\n counts = {}\n for i in range(ngrams):\n for j in range(len(words) - i):\n ngram = tuple(words[j:(j + i + 1)])\n if ngram in counts:\n counts[ngram] += 1\n else:\n counts[ngram] = 1\n return counts\n\n\ndef cook_refs(refs, n=4):\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '''\n return [term_frequency(ref, n) for ref in refs]\n\n\ndef cook_test(test, n=4):\n '''Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)\n '''\n return term_frequency(test, n)\n\n\nclass CiderScorer(object):\n \"\"\"CIDEr scorer.\"\"\"\n\n def copy(self):\n ''' copy the refs.'''\n new = CiderScorer(n=self.n)\n new.ctest = copy.copy(self.ctest)\n new.crefs = copy.copy(self.crefs)\n return new\n\n def __init__(self, test=None, refs=None, n=4, df_mode=\"coco-val-df\"):\n \"\"\"Singular instance.\"\"\"\n self.n = n\n self.df_mode = df_mode\n self.ctest = []\n self.crefs = []\n self.cook_append(test, refs)\n self.ref_len = None\n self.document_frequency = None\n\n def cook_append(self, test, refs):\n \"\"\"Called by constructor and __iadd__ to avoid creating new instances.\"\"\"\n if refs is not None:\n self.crefs.append(cook_refs(refs))\n if test is not None:\n self.ctest.append(cook_test(test)) ## N.B.: -1\n else:\n self.ctest.append(None) # lens of crefs and ctest have to match\n\n def size(self):\n assert len(self.crefs) == len(self.ctest), \"refs/test mismatch! %d<>%d\" % (len(self.crefs), len(self.ctest))\n return len(self.crefs)\n\n def __iadd__(self, other):\n '''add an instance (e.g., from another sentence).'''\n\n if isinstance(other, tuple):\n ## avoid creating new CiderScorer instances\n self.cook_append(other[0], other[1])\n else:\n self.ctest.extend(other.ctest)\n self.crefs.extend(other.crefs)\n return self\n\n def _compute_document_frequency(self):\n '''\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '''\n document_frequency = {}\n if self.df_mode == \"corpus\":\n for refs in self.crefs:\n # refs, k ref captions of one image\n for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]):\n document_frequency[ngram] += 1\n assert(len(self.ctest) >= max(document_frequency.values()))\n elif self.df_mode == \"coco-val-df\":\n document_frequency_temp = json.load(open(os.path.join('data', 'coco_val_df.json')))\n # convert string to tuple\n for key in document_frequency_temp:\n document_frequency[eval(key)] = document_frequency_temp[key]\n else:\n raise ValueError(f\"df_mode can be either corpus or coco-val-df, provided {self.df_mode}!\")\n return document_frequency\n\n def compute_score(self):\n self.document_frequency = self._compute_document_frequency()\n def counts2vec(cnts):\n \"\"\"\n Function maps counts of ngram to vector of tfidf weights.\n The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.\n The n-th entry of array denotes length of n-grams.\n :param cnts:\n :return: vec (array of dict), norm (array of float), length (int)\n \"\"\"\n vec = [{} for _ in range(self.n)]\n length = 0\n norm = [0.0 for _ in range(self.n)]\n for (ngram, term_freq) in cnts.items():\n # give word count 1 if it doesn't appear in reference corpus\n df = np.log(self.document_frequency.get(ngram, 1.0))\n # ngram index\n n = len(ngram) - 1\n # tf (term_freq) * idf (precomputed idf) for n-grams\n vec[n][ngram] = float(term_freq) * (self.ref_len - df)\n # compute norm for the vector. the norm will be used for\n # computing similarity\n norm[n] += pow(vec[n][ngram], 2)\n\n if n == 1:\n length += term_freq\n norm = [np.sqrt(n) for n in norm]\n return vec, norm, length\n\n def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):\n '''\n Compute the cosine similarity of two vectors.\n :param vec_hyp: array of dictionary for vector corresponding to hypothesis\n :param vec_ref: array of dictionary for vector corresponding to reference\n :param norm_hyp: array of float for vector corresponding to hypothesis\n :param norm_ref: array of float for vector corresponding to reference\n :param length_hyp: int containing length of hypothesis\n :param length_ref: int containing length of reference\n :return: array of score for each n-grams cosine similarity\n '''\n delta = float(length_hyp - length_ref)\n # measure consine similarity\n val = np.array([0.0 for _ in range(self.n)])\n for n in range(self.n):\n # ngram\n for (ngram,count) in vec_hyp[n].items():\n val[n] += vec_hyp[n].get(ngram, 0) * vec_ref[n].get(ngram, 0)\n\n if (norm_hyp[n] != 0) and (norm_ref[n] != 0):\n val[n] /= (norm_hyp[n]*norm_ref[n])\n\n assert(not math.isnan(val[n]))\n return val\n\n # compute log reference length\n if self.df_mode == \"corpus\":\n self.ref_len = np.log(float(len(self.crefs)))\n elif self.df_mode == \"coco-val-df\":\n # if coco option selected, use length of coco-val set\n self.ref_len = np.log(float(40504))\n\n scores = []\n for test, refs in zip(self.ctest, self.crefs):\n # compute vector for test captions\n vec, norm, length = counts2vec(test)\n # compute vector for ref captions\n score = np.array([0.0 for _ in range(self.n)])\n for ref in refs:\n vec_ref, norm_ref, length_ref = counts2vec(ref)\n score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)\n # change by vrama91 - mean of ngram scores, instead of sum\n score_avg = np.mean(score)\n # divide by number of references\n score_avg /= len(refs)\n # multiply score by 10\n score_avg *= 10.0\n # append score of an image to the score list\n scores.append(score_avg)\n return np.mean(np.array(scores)), np.array(scores)\n", "sub_path": "pyciderevalcap/cider/cider_scorer.py", "file_name": "cider_scorer.py", "file_ext": "py", "file_size_in_byte": 8304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.tokenize.treebank.TreebankWordTokenizer", "line_number": 36, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 75, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 76, "usage_type": "call"}, {"api_name": "json.load", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}]}
+{"seq_id": "216444699", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_2d_clf_problem(X, y, h=None):\n '''\n Plots a two-dimensional labeled dataset (X,y) and, if function h(x) is given, \n the decision surfaces.\n '''\n assert X.shape[1] == 2, \"Dataset is not two-dimensional\"\n if h!=None : \n # Create a mesh to plot in\n r = 0.02 # mesh resolution\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, r),\n np.arange(y_min, y_max, r))\n XX=np.c_[xx.ravel(), yy.ravel()]\n try:\n Z_test = h(XX)\n if Z_test.shape == ():\n # h returns a scalar when applied to a matrix; map explicitly\n Z = np.array(map(h,XX))\n else :\n Z = Z_test\n except ValueError:\n # can't apply to a matrix; map explicitly\n Z = np.array(map(h,XX))\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1)\n\n # Plot the dataset\n plt.scatter(X[:,0],X[:,1], c=y, cmap=plt.cm.Paired, marker='o', s=50);\n", "sub_path": "labosi/lab-2/2015-16/by_unknown/mlutils.py", "file_name": "mlutils.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.meshgrid", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 33, "usage_type": "attribute"}]}
+{"seq_id": "37699011", "text": "import nltk\nfrom nltk.corpus import gutenberg\n\n\ndef unusual_words(text):\n text_vocab = set(w.lower() for w in text if w.isalpha())\n english_vocab = set(w.lower() for w in nltk.corpus.words.words() )\n unusual = text_vocab.difference(english_vocab)\n return sorted(unusual)\n\n\nlist_of_unusual_words = unusual_words(gutenberg.words('austen-sense.txt'))\nprint(list_of_unusual_words)\n\n\n# stop words\n\nfrom nltk.corpus import stopwords\n\n\nprint(stopwords.words('english'))", "sub_path": "lexical-resources-vocabulary.py", "file_name": "lexical-resources-vocabulary.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.corpus.words.words", "line_number": 7, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 7, "usage_type": "attribute"}, {"api_name": "nltk.corpus.gutenberg.words", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.corpus.gutenberg", "line_number": 12, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "174612595", "text": "from __future__ import print_function\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nimport pandas as pd\nimport os\n\nSPREADSHEET_ID = '1VLmc3ztGtdGbx9eByuzozoL7C7KxIqqI5o5sgheA4SQ' # \nSHEET_NAMES = ['Weights', 'C&S', 'Aero', 'Structures', 'FPP', 'Operations', 'Internal']\n\n\nclass GoogleSheetsDataImport(object):\n\n def __init__(self, SPREADSHEET_ID: str = SPREADSHEET_ID, *pages):\n\n self.__sheet_id = SPREADSHEET_ID\n self.__pages = pages\n\n self.__sheets = {}\n self.__import_sheets()\n\n self.__dataframes = {key: self.__sheet_to_dataframe(value) for key, value in self.__sheets.items()}\n\n def get_data(self):\n return self.__dataframes\n\n def __import_sheets(self):\n\n scopes = 'https://www.googleapis.com/auth/spreadsheets.readonly'\n # Setup the Sheets API\n store = file.Storage('/'.join(os.getcwd().split('/')[:-1]) + '/tools/credentials.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('/'.join(os.getcwd().split('/')[:-1]) + '/tools/client_secret.json', scopes)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n\n # Call the Sheets API\n for page in self.__pages:\n gsheet = service.spreadsheets().values().get(spreadsheetId=self.__sheet_id, range=page).execute()\n self.__sheets[page] = gsheet\n\n @staticmethod\n def __sheet_to_dataframe(gsheet):\n\n def istext(item: str):\n for char in item:\n if 65<= ord(char) <= 90 or 97 <= ord(char) <= 122:\n return True\n else:\n continue\n\n try:\n header = gsheet.get('values', [])[0] # Assumes first line is header!\n\n except IndexError:\n return\n\n values = gsheet.get('values', [])[1:] # Everything else is data.\n\n if not values:\n print('No data found.')\n return pd.Series(header)\n\n else:\n all_data = []\n for col_id, col_name in enumerate(header):\n column_data = []\n for row in values:\n #print(col_id)\n item = row[col_id]\n #print(item)\n if '[' in item:\n item = [float(i) for i in item[1:-1].split(',')]\n\n elif col_name == 'Date' or col_name == 'Notes':\n pass\n\n elif not istext(item):\n item = float(item)\n\n else:\n pass\n\n column_data.append(item)\n\n ds = pd.Series(data=column_data, name=col_name)\n all_data.append(ds)\n df = pd.concat(all_data, axis=1)\n return df.iloc[0]\n\n\nif __name__ == '__main__':\n\n G = GoogleSheetsDataImport(SPREADSHEET_ID, 'Weights', 'C&S')\n data = G.get_data()\n", "sub_path": "src/tools/GoogleSheetsImportMac.py", "file_name": "GoogleSheetsImportMac.py", "file_ext": "py", "file_size_in_byte": 3076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "oauth2client.file.Storage", "line_number": 31, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 31, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 31, "usage_type": "call"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 34, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 34, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 34, "usage_type": "call"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 35, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 35, "usage_type": "name"}, {"api_name": "googleapiclient.discovery.build", "line_number": 36, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 89, "usage_type": "call"}]}
+{"seq_id": "5901086", "text": "\"\"\"\n Proximal Policy Optimization (PPO) Algorithm\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.utils import shuffle\n\n\nclass PPO(object):\n def __init__(self, s_dim, a_dim, sess, policy_logvar=-1.0,\n lr_a=0.0001, lr_c=0.001, gamma=0.99,\n epsilon=0.2, batch_size=256,\n c_epochs=10, a_epochs=10, clipping_range=0.2):\n self.sess = sess\n\n self.replay_buffer_x = None\n self.replay_buffer_y = None\n self.c_epochs, self.a_epochs = c_epochs, a_epochs\n\n self.s_dim, self.a_dim = s_dim, a_dim\n self.lr_a, self.lr_c = lr_a, lr_c\n self.gamma = gamma\n self.epsilon = epsilon\n self.policy_logvar = policy_logvar\n self.batch_size = batch_size\n\n self.policy_logvar = policy_logvar\n self.clipping_range = clipping_range\n\n self._placeholders()\n self.v = self._build_value_net(self.s_ph, scope='value_function', trainable=True)\n\n # actor\n self.means, self.log_vars = self._build_policy_net(self.s_ph, 'policy', trainable=True)\n self.logp, self.logp_old = self._logprob()\n\n self.sampled_act = self.means + tf.exp(self.log_vars / 2.0) * tf.random_normal(shape=[self.a_dim,])\n\n self.c_loss = tf.reduce_mean(tf.square(self.v - self.val_ph))\n self.c_train_op = tf.train.AdamOptimizer(self.lr_c).minimize(self.c_loss)\n\n # clipped surrogate objective\n pg_ratio = tf.exp(self.logp - self.logp_old)\n clipped_pg_ratio = tf.clip_by_value(pg_ratio, 1 - self.clipping_range, 1 + self.clipping_range)\n surrogate_loss = tf.minimum(self.adv_ph * pg_ratio, self.adv_ph * clipped_pg_ratio)\n self.a_loss = - tf.reduce_mean(surrogate_loss)\n self.a_train_op = tf.train.AdamOptimizer(self.lr_a).minimize(self.a_loss)\n\n self.sess.run(tf.global_variables_initializer())\n self._print_hyperparams()\n\n print('-- INFO: PPO initialized.')\n print('==========================')\n\n def _print_hyperparams(self):\n print('------------------- Hyperparameters ----------------------')\n print('-- S_Dim:', self.s_dim)\n print('-- A_Dim:', self.a_dim)\n print('-- LR_V:', self.lr_c)\n print('-- LR_Actor:', self.lr_a)\n print('-- Gamma:', self.gamma)\n print('-- Batch_size:', self.batch_size)\n print('--')\n\n def _placeholders(self):\n \"\"\" Input placeholders\"\"\"\n # observations, actions and advantages:\n self.s_ph = tf.placeholder(tf.float32, [None, self.s_dim], 'state')\n self.a_ph = tf.placeholder(tf.float32, [None, self.a_dim], 'action')\n self.adv_ph = tf.placeholder(tf.float32, [None, ], 'advantages')\n self.val_ph = tf.placeholder(tf.float32, [None, 1], 'val_valfunc')\n\n self.old_log_vars_ph = tf.placeholder(tf.float32, [self.a_dim, ], 'old_log_vars')\n self.old_means_ph = tf.placeholder(tf.float32, [None, self.a_dim], 'old_means')\n\n def _build_value_net(self, s, scope, trainable):\n with tf.variable_scope(scope):\n fc1 = tf.layers.dense(s, 200, activation=tf.nn.relu, name='fc1', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / self.s_dim)))\n fc2 = tf.layers.dense(fc1, 100, activation=tf.nn.relu, name='fc2', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 200)))\n v = tf.layers.dense(fc2, 1, activation=None, name='v_value', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 100)))\n return v\n\n def _build_policy_net(self, s, scope, trainable):\n with tf.variable_scope(scope):\n fc1 = tf.layers.dense(s, 200, activation=tf.nn.relu, name='fc1', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / self.s_dim)))\n fc2 = tf.layers.dense(fc1, 100, activation=tf.nn.relu, name='fc2', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 200)))\n means = tf.layers.dense(fc2, self.a_dim, activation=tf.nn.tanh, name='means', trainable=trainable,\n kernel_initializer=tf.random_normal_initializer(stddev=np.sqrt(1 / 100)))\n logvar_speed = (10 * 64) // 48\n spd_log_vars = tf.get_variable('spd_logvars', [logvar_speed, self.a_dim], tf.float32,\n tf.constant_initializer(0.0))\n log_vars = tf.reduce_sum(spd_log_vars, axis=0) + self.policy_logvar\n\n return means, log_vars\n\n def _logprob(self):\n \"\"\" Calculate log probabilities of a batch of observations & actions\n\n Calculates log probabilities using previous step's model parameters and\n new parameters being trained.\n \"\"\"\n logp = -0.5 * tf.reduce_sum(self.log_vars)\n logp += -0.5 * tf.reduce_sum(tf.square(self.a_ph - self.means) /\n tf.exp(self.log_vars), axis=1)\n\n logp_old = -0.5 * tf.reduce_sum(self.old_log_vars_ph)\n logp_old += -0.5 * tf.reduce_sum(tf.square(self.a_ph - self.old_means_ph) /\n tf.exp(self.old_log_vars_ph), axis=1)\n\n return logp, logp_old\n\n def choose_action(self, s):\n s = s[np.newaxis, :]\n a = self.sess.run(self.sampled_act, feed_dict={self.s_ph: s})[0]\n return np.clip(a, -1, 1)\n\n def predict_v(self, s):\n \"\"\" Predict method \"\"\"\n y_hat = self.sess.run(self.v, feed_dict={self.s_ph: s})\n\n return np.squeeze(y_hat)\n\n def update_p(self, observes, actions, advantages):\n \"\"\" Update policy based on observations, actions and advantages\n\n Args:\n observes: observations, shape = (N, obs_dim)\n actions: actions, shape = (N, act_dim)\n advantages: advantages, shape = (N,)\n \"\"\"\n feed_dict = {self.s_ph: observes,\n self.a_ph: actions,\n self.adv_ph: advantages,\n }\n\n old_means_np, old_log_vars_np = self.sess.run([self.means, self.log_vars], feed_dict)\n feed_dict[self.old_log_vars_ph] = old_log_vars_np\n feed_dict[self.old_means_ph] = old_means_np\n\n a_loss = 0\n for e in range(self.a_epochs):\n # TODO: need to improve data pipeline - re-feeding data every epoch\n self.sess.run(self.a_train_op, feed_dict)\n a_loss = self.sess.run(self.a_loss, feed_dict)\n\n return a_loss\n\n def update_v(self, x, y):\n \"\"\" Fit model to current data batch + previous data batch\n\n Args:\n x: features\n y: target\n logger: logger to save training loss and % explained variance\n \"\"\"\n num_batches = max(x.shape[0] // self.batch_size, 1)\n batch_size = x.shape[0] // num_batches\n\n if self.replay_buffer_x is None:\n x_train, y_train = x, y\n else:\n x_train = np.concatenate([x, self.replay_buffer_x])\n y_train = np.concatenate([y, self.replay_buffer_y])\n self.replay_buffer_x = x\n self.replay_buffer_y = y\n\n for e in range(self.c_epochs):\n x_train, y_train = shuffle(x_train, y_train)\n for j in range(num_batches):\n start = j * batch_size\n end = (j + 1) * batch_size\n feed_dict = {self.s_ph: x_train[start:end, :],\n self.val_ph: y_train[start:end].reshape(-1, 1)}\n _, l = self.sess.run([self.c_train_op, self.c_loss], feed_dict=feed_dict)\n\n y_hat = self.predict_v(x)\n c_loss = np.mean(np.square(y_hat - y)) # explained variance after update\n\n return c_loss\n", "sub_path": "networks/ppo.py", "file_name": "ppo.py", "file_ext": "py", "file_size_in_byte": 7946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.exp", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.constant_initializer", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 184, "usage_type": "call"}]}
+{"seq_id": "301660674", "text": "#! /usr/local/bin/python3\n\nimport sys\nfrom skimage import io\nfrom matplotlib import pyplot as plt\n\nFILE_NAME = sys.argv[1]\nTARGET_FILE = \"textrender\"\n\nimg = io.imread(FILE_NAME)\n\nfig = plt.figure()\nplt.imshow(img)\nplt.text(400,400, \"hello\", color=\"white\", fontsize=5)\nfig.savefig(TARGET_FILE+\".pdf\", dpi=600, bbox_inches=\"tight\")\n", "sub_path": "render_text.py", "file_name": "render_text.py", "file_ext": "py", "file_size_in_byte": 331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "180772050", "text": "from setuptools import setup\nfrom codecs import open\nfrom os import path\n\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='images2gif',\n version='1.0.0',\n description='Python 3 compatible images2gif.py',\n long_description=long_description,\n url='https://github.com/isaacgerg/images2gif',\n author='Almar Klein, Ant1, Marius van Voorden',\n license='BSD',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3.5',\n ],\n py_modules=['images2gif'],\n install_requires=['numpy>=1.11.1', 'Pillow>=3.3.1', 'scipy>=0.18.0'],\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "618730489", "text": "from torchvision import datasets, transforms\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom Facenet.load_dataset import load_dataset\r\nfrom Model.MobileFacenet import MobileFaceNet\r\n\r\nimport torch.nn.functional\r\nimport torch\r\nimport pickle\r\nimport math\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef create_svm():\r\n model = MobileFaceNet(512).to(torch.device(\"cuda:0\"))\r\n model.load_state_dict(torch.load('../PretrainedModel/model.pth'))\r\n\r\n dataset = load_dataset('../Dataset/Processed/')\r\n images = []\r\n labels = []\r\n\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])\r\n\r\n for class_name in dataset:\r\n for path in class_name.paths:\r\n img = cv2.imread(path)\r\n img = cv2.resize(img,(112,112))\r\n img = transform(img)\r\n img = img.type(torch.FloatTensor)\r\n images.append(img)\r\n labels.append(class_name.name)\r\n\r\n img_batch = torch.utils.data.DataLoader(images, batch_size=32)\r\n labels = np.array(labels)\r\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=2)\r\n\r\n #---------------------CREATE EMBEDDING AND LABEL-----------------------------------------\r\n labels_encoder = LabelEncoder().fit(labels)\r\n labelsNum = labels_encoder.transform(labels)\r\n nClasses = len(labels_encoder.classes_)\r\n nrof_img = len(labelsNum)\r\n emb = np.zeros((nrof_img,512))\r\n idx = 0\r\n\r\n model.eval()\r\n\r\n for batch in iter(img_batch):\r\n with torch.no_grad():\r\n batch = batch.to(torch.device(\"cuda:0\"))\r\n embedding = model(batch).cpu()\r\n emb[idx:idx+32,:] = embedding\r\n idx += 32\r\n\r\n clf = SVC(C=1, kernel='linear', probability=True)\r\n clf.fit(emb,labelsNum)\r\n\r\n fname = '../PretrainedModel/classifier.pkl'\r\n with open(fname, 'wb') as f:\r\n pickle.dump((labels_encoder, clf), f)\r\n", "sub_path": "create_svm.py", "file_name": "create_svm.py", "file_ext": "py", "file_size_in_byte": 1870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "Model.MobileFacenet.MobileFaceNet", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 17, "usage_type": "call"}, {"api_name": "Facenet.load_dataset.load_dataset", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 55, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "465295199", "text": "import tensorflow as tf\nfrom nets.inception_v3 import inception_v3, inception_v3_arg_scope\nfrom tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step\nfrom tensorflow.python.platform import tf_logging as logging\nfrom preprocessing import preprocessing_factory\nfrom sklearn.metrics import confusion_matrix\nimport os\nimport time\n\nimport numpy as np\n\nslim = tf.contrib.slim\n\nlog_dir = 'tmp/log/both_with_cp'\nlog_eval = 'tmp/log_eval_test/both_with_cp'\n\n# two dataset directories\ndataset_dir_rgb = 'tmp/dataset_parallel_trf/rgb'\ndataset_dir_depth = 'tmp/dataset_parallel_trf/depth'\n# correspondingly two labels files\nlabels_file_rgb = 'tmp/dataset_parallel_trf/rgb/labels.txt'\nlabels_file_depth = 'tmp/dataset_parallel_trf/depth/labels.txt'\n\nbatch_size = 10\n\nnum_epochs = 1\n\nnum_classes = 5\n\nlabels = open(labels_file_rgb, 'r')\nlabels_to_name = {}\nfor line in labels:\n label, string_name = line.split(':')\n string_name = string_name[:-1]\n labels_to_name[int(label)] = string_name\n\nfile_pattern = 'objects_%s_*.tfrecord'\n\nitems_to_descriptions = {\n 'image': 'A 3-channel RGB coloured product image',\n 'label': 'A label that from 4 labels'\n}\n\ncheckpoint_file = tf.train.latest_checkpoint(log_dir)\n\ndef get_split(split_name, dataset_dir, file_pattern=file_pattern, file_pattern_for_counting='objects'):\n if split_name not in ['train', 'validation']:\n raise ValueError(\n 'The split_name %s is not recognized. Please input either train or validation as the split_name' % (\n split_name))\n\n file_pattern_path = os.path.join(dataset_dir, file_pattern % (split_name))\n\n num_samples = 0\n file_pattern_for_counting = 'objects' + '_' + split_name\n tfrecords_to_count = [os.path.join(dataset_dir, file) for file in os.listdir(dataset_dir) if\n file.startswith(file_pattern_for_counting)]\n for tfrecord_file in tfrecords_to_count:\n for record in tf.python_io.tf_record_iterator(tfrecord_file):\n num_samples += 1\n\n test = num_samples\n\n reader = tf.TFRecordReader\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),\n 'image/class/label': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n }\n\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image(),\n 'label': slim.tfexample_decoder.Tensor('image/class/label'),\n }\n\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\n\n labels_to_name_dict = labels_to_name\n\n dataset = slim.dataset.Dataset(\n data_sources=file_pattern_path,\n decoder=decoder,\n reader=reader,\n num_readers=4,\n num_samples=num_samples,\n num_classes=num_classes,\n labels_to_name=labels_to_name_dict,\n items_to_descriptions=items_to_descriptions)\n\n return dataset\n\ndef load_batch(dataset, batch_size, is_training=True):\n '''\n Loads a batch for training.\n\n INPUTS:\n - dataset(Dataset): a Dataset class object that is created from the get_split function\n - batch_size(int): determines how big of a batch to train\n - height(int): the height of the image to resize to during preprocessing\n - width(int): the width of the image to resize to during preprocessing\n - is_training(bool): to determine whether to perform a training or evaluation preprocessing\n\n OUTPUTS:\n - images(Tensor): a Tensor of the shape (batch_size, height, width, channels) that contain one batch of images\n - labels(Tensor): the batch's labels with the shape (batch_size,) (requires one_hot_encoding).\n\n '''\n # First create the data_provider object\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n common_queue_capacity=24 + 3 * batch_size,\n common_queue_min=24,\n shuffle=False)\n\n # # Obtain the raw image using the get method\n image, label = data_provider.get(['image', 'label'])\n\n # # Perform the correct preprocessing for this image depending if it is training or evaluating\n image_preprocessing_fn = preprocessing_factory.get_preprocessing('inception_v3',is_training=False)\n\n train_image_size = 256\n image = image_preprocessing_fn(image, train_image_size, train_image_size)\n\n # # Batch up the image by enqueing the tensors internally in a FIFO queue and dequeueing many elements with tf.train.batch.\n images, labels = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=4,\n capacity=5 * batch_size)\n\n return images, labels\n\n\ndef run():\n end_points = {}\n if not os.path.exists(log_eval):\n os.mkdir(log_eval)\n with tf.Graph().as_default() as graph:\n tf.logging.set_verbosity(tf.logging.INFO) # Set the verbosity to INFO level\n \n ########################################################\n # Get RGB dataset and the Imagenet trained on RGB images\n ########################################################\n \n # First create the dataset and load one batch\n dataset_rgb = get_split('validation', dataset_dir_rgb, file_pattern=file_pattern)\n images_rgb, labels_rgb = load_batch(dataset_rgb, batch_size=batch_size)\n\n # Know the number steps to take before decaying the learning rate and batches per epoch\n num_batches_per_epoch = int(dataset_rgb.num_samples / batch_size)\n num_steps_per_epoch = num_batches_per_epoch # Because one step is one batch processed\n\n with tf.variable_scope(\"net_rgb\"):\n # Create the model inference\n with slim.arg_scope(inception_v3_arg_scope()):\n logits_rgb, end_points_rgb = inception_v3(images_rgb, num_classes=dataset_rgb.num_classes, is_training=True)\n\n \n ########################################################\n # Get depth dataset and the Imagenet trained on depth images\n ########################################################\n \n # First create the dataset and load one batch\n dataset_depth = get_split('validation', dataset_dir_depth, file_pattern=file_pattern)\n images_depth, labels_depth = load_batch(dataset_depth, batch_size=batch_size)\n\n # Create the model inference\n with tf.variable_scope(\"net_depth\"):\n with slim.arg_scope(inception_v3_arg_scope()):\n logits_depth, end_points_depth = inception_v3(images_depth, num_classes=dataset_rgb.num_classes, is_training=True)\n\n ########################################################\n # Combine the models with the concatenation operation\n # and add an FC layer on top\n ########################################################\n \n # \n with tf.variable_scope(\"concat_dense\"): \n W_master = tf.Variable(tf.random_uniform([10, 5], -0.01, 0.01), name = \"weights_concat\")\n b_master = tf.Variable(tf.zeros([5]), name = \"bias_concat\")\n \n h_master = tf.matmul(tf.concat((logits_rgb, logits_depth), axis=1), W_master) + b_master\n \n logits2 = tf.layers.dense(inputs=h_master, units=(num_classes * 2), name=\"dense_concat1\")\n \n logits = tf.layers.dense(inputs=logits2, units=num_classes, name=\"dense_concat0\")\n \n end_points['Logits'] = logits\n end_points['Predictions'] = slim.softmax(logits, scope='Predictions')\n \n variables_to_restore = slim.get_variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n def restore_fn(sess):\n return saver.restore(sess, checkpoint_file)\n \n ####################################################\n # EVALUATION\n ####################################################\n\n predictions = tf.argmax(end_points['Predictions'], 1)\n accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels_rgb)\n metrics_op = tf.group(accuracy_update)\n\n global_step = get_or_create_global_step()\n global_step_op = tf.assign(global_step, global_step + 1)\n \n conf_m = np.zeros((5, 5))\n\n def eval_step(sess, metrics_op, global_step, confusion_m):\n '''\n Simply takes in a session, runs the metrics op and some logging information.\n '''\n \n start_time = time.time()\n _, global_step_count, accuracy_value = sess.run([metrics_op, global_step_op, accuracy])\n time_elapsed = time.time() - start_time\n \n images_rgb_im, images_depth_im, labels, prediction = sess.run([images_rgb, images_depth, labels_rgb, predictions])\n \n confusion_m += confusion_matrix(labels, prediction, labels = [0, 1, 2, 3, 4])\n\n logging.info('Global Step %s: Streaming Accuracy: %.4f (%.2f sec/step)', global_step_count, accuracy_value,\n time_elapsed)\n\n return accuracy_value\n\n tf.summary.scalar('Validation_Accuracy', accuracy)\n my_summary_op = tf.summary.merge_all()\n\n sv = tf.train.Supervisor(logdir=log_eval, summary_op=None, saver=None, init_fn=restore_fn)\n\n with sv.managed_session() as sess:\n num_steps_per_epoch = int(num_steps_per_epoch)\n for step in range(num_steps_per_epoch * num_epochs):\n sess.run(sv.global_step)\n if step % num_batches_per_epoch == 0:\n logging.info('Epoch: %s/%s', step / num_batches_per_epoch + 1, num_epochs)\n logging.info('Current Streaming Accuracy: %.4f', sess.run(accuracy))\n\n if step % 10 == 0:\n eval_step(sess, metrics_op=metrics_op, global_step=sv.global_step, confusion_m = conf_m)\n summaries = sess.run(my_summary_op)\n sv.summary_computed(sess, summaries)\n\n\n else:\n eval_step(sess, metrics_op=metrics_op, global_step=sv.global_step,confusion_m = conf_m)\n\n logging.info('Final Streaming Accuracy: %.4f', sess.run(accuracy))\n \n images_rgb, images_depth, labels, predictions = sess.run([images_rgb, images_depth, labels_rgb, predictions])\n \n print (sess.run(end_points['Predictions']))\n \n print (conf_m)\n \n \n for i in range(10):\n label, prediction = labels[i], predictions[i]\n prediction_name, label_name = dataset_rgb.labels_to_name[prediction], dataset_rgb.labels_to_name[label]\n text = 'Prediction: %s \\n Ground Truth: %s' % (prediction_name, label_name)\n print(text)\n logging.info(\n 'Model evaluation has completed! Visit TensorBoard for more information regarding your evaluation.')\n \nif __name__ == '__main__':\n run()", "sub_path": "eval_both.py", "file_name": "eval_both.py", "file_ext": "py", "file_size_in_byte": 11069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.contrib", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.python_io.tf_record_iterator", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.python_io", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.TFRecordReader", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.FixedLenFeature", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "preprocessing.preprocessing_factory.get_preprocessing", "line_number": 121, "usage_type": "call"}, {"api_name": "preprocessing.preprocessing_factory", "line_number": 121, "usage_type": "name"}, {"api_name": "tensorflow.train.batch", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.logging.set_verbosity", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 155, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3_arg_scope", "line_number": 157, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 170, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3_arg_scope", "line_number": 171, "usage_type": "call"}, {"api_name": "nets.inception_v3.inception_v3", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform", "line_number": 181, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 184, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 186, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 194, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.contrib.metrics.streaming_accuracy", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tensorflow.group", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "time.time", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 223, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 225, "usage_type": "name"}, {"api_name": "tensorflow.summary.scalar", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 230, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 231, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Supervisor", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 233, "usage_type": "attribute"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 240, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 240, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 241, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 241, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 252, "usage_type": "name"}, {"api_name": "tensorflow.python.platform.tf_logging.info", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.python.platform.tf_logging", "line_number": 266, "usage_type": "name"}]}
+{"seq_id": "438107253", "text": "# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_desktopvirtualization.generated._client_factory import cf_workspace\n desktopvirtualization_workspace = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._workspace_operatio'\n 'ns#WorkspaceOperations.{}',\n client_factory=cf_workspace)\n with self.command_group('desktopvirtualization workspace', desktopvirtualization_workspace,\n client_factory=cf_workspace, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_workspace_list')\n g.custom_show_command('show', 'desktopvirtualization_workspace_show')\n g.custom_command('create', 'desktopvirtualization_workspace_create')\n g.custom_command('update', 'desktopvirtualization_workspace_update')\n g.custom_command('delete', 'desktopvirtualization_workspace_delete')\n\n from azext_desktopvirtualization.generated._client_factory import cf_application_group\n desktopvirtualization_application_group = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._application_group_'\n 'operations#ApplicationGroupOperations.{}',\n client_factory=cf_application_group)\n with self.command_group('desktopvirtualization applicationgroup', desktopvirtualization_application_group,\n client_factory=cf_application_group, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_applicationgroup_list')\n g.custom_show_command('show', 'desktopvirtualization_applicationgroup_show')\n g.custom_command('create', 'desktopvirtualization_applicationgroup_create')\n g.custom_command('update', 'desktopvirtualization_applicationgroup_update')\n g.custom_command('delete', 'desktopvirtualization_applicationgroup_delete')\n\n from azext_desktopvirtualization.generated._client_factory import cf_host_pool\n desktopvirtualization_host_pool = CliCommandType(\n operations_tmpl='azext_desktopvirtualization.vendored_sdks.desktopvirtualization.operations._host_pool_operatio'\n 'ns#HostPoolOperations.{}',\n client_factory=cf_host_pool)\n with self.command_group('desktopvirtualization hostpool', desktopvirtualization_host_pool,\n client_factory=cf_host_pool, is_experimental=True) as g:\n g.custom_command('list', 'desktopvirtualization_hostpool_list')\n g.custom_show_command('show', 'desktopvirtualization_hostpool_show')\n g.custom_command('create', 'desktopvirtualization_hostpool_create')\n g.custom_command('update', 'desktopvirtualization_hostpool_update')\n g.custom_command('delete', 'desktopvirtualization_hostpool_delete')\n", "sub_path": "src/desktopvirtualization/azext_desktopvirtualization/generated/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 3395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 19, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_workspace", "line_number": 22, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_workspace", "line_number": 24, "usage_type": "name"}, {"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 32, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_application_group", "line_number": 35, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_application_group", "line_number": 37, "usage_type": "name"}, {"api_name": "azure.cli.core.commands.CliCommandType", "line_number": 45, "usage_type": "call"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_host_pool", "line_number": 48, "usage_type": "name"}, {"api_name": "azext_desktopvirtualization.generated._client_factory.cf_host_pool", "line_number": 50, "usage_type": "name"}]}
+{"seq_id": "547131191", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport json\nimport time\nimport requests\nimport random\nfrom dotenv import load_dotenv\nfrom loguru import logger\nfrom slackclient import SlackClient\n\nload_dotenv()\n\nSLCKBTD = None\nSLCKCLNT = SlackClient(os.getenv(\"SLACK_BOT_TOKEN\"))\n\n#[ Magic Declarations ]#\nRTM_READ_DELAY = 1\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\n#[ End magic declarations ]#\n\n#------ Define bot functions here ------------------------------------------\ndef say_hello(command, event):\n return f\"Hello {event['user']} :snake:\"\n \ndef get_kitty(command, event):\n r = requests.get(f\"https://community-placekitten.p.rapidapi.com/{random.randint(100, 600)}/{random.randint(100, 600)}\", headers={\"X-RapidAPI-Key\": os.getenv(\"X-RapidAPI-Key\")})\n open('kitty.png', 'wb').write(r.content)\n SLCKCLNT.api_call(\n 'files.upload', \n channels=event['channel'], \n as_user=True, \n filename='pic.jpg', \n file=open('kitty.png', 'rb'),\n )\n return False\n\ndef get_norris(command, event):\n r = requests.get(\"https://matchilling-chuck-norris-jokes-v1.p.rapidapi.com/jokes/random\", headers={\"X-RapidAPI-Key\": os.getenv(\"X-RapidAPI-Key\")})\n j = json.loads(r.text)\n return j[\"value\"] + \" :tada:\"\n\ndef get_insult(command, event):\n r = requests.get(f\"https://insult.mattbas.org/api/insult.txt?who={event['user']}\")\n return r.text\n#------ Add definitions to CMDS dict ---------------------------------------\n\nCMMDS = {\n # Key: func\n \"hi\": say_hello,\n \"kitten\": get_kitty,\n \"norris\": get_norris,\n \"insult\": get_insult\n}\n\ndef humanizeChannel(channel):\n return \"#{}\".format(\n SLCKCLNT.api_call(\n \"channels.info\", \n channel=channel\n ).get(\n 'channel', \n {}\n ).get('name'))\n\n\ndef humanizeUser(user):\n return SLCKCLNT.api_call(\n \"users.info\", \n user=user).get(\n 'user', \n {}\n ).get('name')\n\n\ndef parse_incoming(sevent):\n for event in sevent:\n if 'user' in event:\n event['user'] = humanizeUser(event['user'])\n\n if 'channel' in event:\n event['channel'] = humanizeChannel(event['channel'])\n\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n\n user_id, message = matchDirect(event[\"text\"])\n if user_id == SLCKBTD:\n logger.info(f\"Message Recieved in {event['channel']}: {message}\")\n return message, event\n # if 'subtype' in event and event['subtype'] != \"bot_message\":\n logger.debug(event)\n return None, None\n\ndef matchDirect(msg):\n r = re.search(MENTION_REGEX, msg)\n return (r.group(1), r.group(2).strip()) if r else (None, None)\n\ndef handle_command(command, event):\n comm = command.split(\" \")\n response = None\n\n if comm[0] in CMMDS:\n response = CMMDS[comm[0]](command, event)\n \n if response != False and response != None:\n logger_response = response.replace('\\n', ' ')[:20]\n logger.info(f\"Response: {logger_response}...\") \n SLCKCLNT.api_call(\n \"chat.postMessage\",\n channel=event['channel'],\n text=response or \"What was that? :: Try: \" + \", \".join([x for x in CMMDS.keys()])\n )\n\n if response == None:\n SLCKCLNT.api_call(\n \"chat.postMessage\",\n channel=event['channel'],\n text=\"What was that? :: Try: \" + \", \".join([x for x in CMMDS.keys()])\n )\n\nif __name__ == \"__main__\":\n if SLCKCLNT.rtm_connect(with_team_state=False):\n SLCKBTD = SLCKCLNT.api_call(\"auth.test\")[\"user_id\"]\n logger.info(f\"Bot connected {SLCKBTD}\")\n\n while True:\n command, channel = parse_incoming(SLCKCLNT.rtm_read())\n if command:\n handle_command(command, channel)\n time.sleep(RTM_READ_DELAY)\n else:\n logger.exception(\"Connection Failed\")\n \n", "sub_path": "jbot.py", "file_name": "jbot.py", "file_ext": "py", "file_size_in_byte": 3973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 15, "usage_type": "call"}, {"api_name": "slackclient.SlackClient", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 91, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 91, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 94, "usage_type": "name"}, {"api_name": "re.search", "line_number": 98, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 110, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 110, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 127, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 127, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "loguru.logger.exception", "line_number": 135, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 135, "usage_type": "name"}]}
+{"seq_id": "221117320", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#!/usr/bin/python2.5\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom google.appengine.ext import db\nfrom google.appengine.datastore import entity_pb\n\n\ndef to_binary(data):\n \"\"\" compresses entities or lists of entities for caching.\n \n Args: \n data - arbitrary data input, on its way to memcache\n \"\"\"\n if isinstance(data, db.Model):\n # Just one instance\n return makeProtoBufObj(data)\n # if none of the first 5 items are models, don't look for entities\n elif isinstance(data, list) and any(isinstance(x, db.Model) for x in data):\n # list of entities\n entities = []\n for obj in data:\n # if item is entity, convert it.\n if isinstance(obj, db.Model):\n protobuf_obj = makeProtoBufObj(obj)\n entities.append(protobuf_obj)\n else:\n entities.append(obj)\n buffered_list = ProtoBufList(entities)\n return buffered_list\n else: # return data as is \n return data\n\n\ndef from_binary(data):\n \"\"\" decompresses entities or lists from cache.\n \n Args: \n data - arbitrary data input from memcache\n \"\"\"\n if isinstance(data, ProtoBufObj):\n # Just one instance\n return db.model_from_protobuf(entity_pb.EntityProto(data.val))\n elif isinstance(data, ProtoBufList):\n entities = []\n for obj in data.vals:\n # if item is entity, convert it.\n if isinstance(obj, ProtoBufObj):\n model_class = obj.model_class\n entities.append(db.model_from_protobuf(entity_pb.EntityProto(obj.val)))\n else:\n entities.append(obj)\n return entities\n else: # return data as is\n return data\n\nclass ProtoBufObj():\n \"\"\" special type used to identify protobuf objects \"\"\"\n def __init__(self, val, model_class): \n self.val = val\n self.model_class = model_class\n # model class makes it unnecessary to import model classes\n \nclass ProtoBufList():\n \"\"\" special type used to identify list containing protobuf objects \"\"\"\n def __init__(self, vals):\n self.vals = vals\n\ndef makeProtoBufObj(obj):\n val = db.model_to_protobuf(obj).Encode()\n model_class = db.class_for_kind(obj.kind())\n return ProtoBufObj(val, model_class) \n", "sub_path": "util/cache_compress.py", "file_name": "cache_compress.py", "file_ext": "py", "file_size_in_byte": 2863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "google.appengine.ext.db.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 26, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Model", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 30, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.Model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 35, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_from_protobuf", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 54, "usage_type": "name"}, {"api_name": "google.appengine.datastore.entity_pb.EntityProto", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.datastore.entity_pb", "line_number": 54, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_from_protobuf", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 61, "usage_type": "name"}, {"api_name": "google.appengine.datastore.entity_pb.EntityProto", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.datastore.entity_pb", "line_number": 61, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.model_to_protobuf", "line_number": 81, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 81, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.class_for_kind", "line_number": 82, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 82, "usage_type": "name"}]}
+{"seq_id": "596670846", "text": "import mongodb\nimport config\nimport telebot\nfrom telebot import types\n\n\nbot = telebot.TeleBot(config.token)\n\n\n@bot.message_handler(commands=['feedback'])\ndef send_contacts(message):\n bot.send_message(message.chat.id, 'Будем оставаться на связи!\\n')\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_phone = types.KeyboardButton(text=\"Отправить номер телефона\", request_contact=True)\n keyboard.add(button_phone)\n bot.send_message(message.chat.id,\n 'Добро пожаловать в BotsApp \\U0001F60A \\n'\n 'Для авторизации нажмите кнопку «Отправить мой номер».',\n reply_markup=keyboard)\n\n\n@bot.message_handler(content_types='contact')\n@bot.message_handler(func=lambda message: message.text == 'Основное меню', content_types=['text'])\ndef main_menu(message):\n mongodb.MongoAdd().inserting(message.contact)\n keyboard = types.ReplyKeyboardMarkup()\n big_button = types.KeyboardButton(text='\\U0001F5B2 Заказать бота')\n button_1a = types.KeyboardButton(text='\\U0001F532 Услуги')\n button_1b = types.KeyboardButton(text='\\U0001F47E Роботы BotsApp')\n button_2a = types.KeyboardButton(text='\\U00002754 F.A.Q.',)\n button_2b = types.KeyboardButton(text='\\U0001F4F2 Контакты')\n keyboard.row(big_button)\n keyboard.row(button_1a, button_1b)\n keyboard.row(button_2a, button_2b)\n bot.send_message(message.chat.id,\n '\\U00002754 Узнайте что такое чат-боты Telegram\\n '\n '\\U0001F441 смотрите примеры наших роботов\\n '\n '\\U0001F916 заказывайте бота\\n '\n '\\U0001F4F2 мы всегда готовы ответить на любые ваши вопросы',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == '❔ F.A.Q.', content_types=['text'])\n@bot.message_handler(func=lambda message: message.text == '🔙 🤖 Назад', content_types=['text'])\ndef send_faq(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='Что умеют?')\n button_2 = types.KeyboardButton(text='Как заказать?')\n button_3 = types.KeyboardButton(text='Стоимость')\n button_4 = types.KeyboardButton(text='Поддержка')\n button_5 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2, button_3, button_4, button_5)\n bot.send_message(message.chat.id,\n ' Что такое бот?\\n'\n '\\U0001F916 Telegram-бот – это:\\n'\n '● диалоговый агент, который имитирует осмысленную беседу без участия человека;\\n'\n '● помогает компаниям предоставить для пользователей персонализированный сервис '\n 'и увеличить лояльность к бренду;\\n'\n '● полноценный сотрудник любой организации, который работает круглосуточно и '\n 'которому не нужно платить.',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Что умеют?', content_types=['text'])\ndef what_can_do(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='\\U0001F519 \\U0001F916 Назад')\n button_2 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2)\n bot.send_message(message.chat.id,\n 'Что умеют наши чат-боты?\\n'\n '● Предлагают пользователю любую форму(например,бронирование столика) и обрабатывают её\\n'\n '● Выводят каталоги в удобном виде:\\n'\n '\\U000027A1список услуг компании,\\n'\n '\\U000027A1часто задаваемые вопросы,\\n'\n '\\U000027A1каталог мобильных устройств и прочее\\n'\n '● Заменяют колл-центр вашей компании\\n'\n '● Оформляют рассылку (поскольку клиент сам добавляет бота в контакты личной переписки, то ни одно' \n 'ваше сообщение не останется без внимания)\\n'\n '● и многое другое.\\n\\n'\n 'У такого бота будет несколько основных козырей:\\n'\n '\\U00002705регулярная поддержка внимания клиента к продукту/услуге\\n'\n '\\U00002705автоматические экспресс-консультации и продажи прямо в личном чате Telegram\\n'\n '\\U00002705разработка бота выйдет дешевле, чем создание своего приложения, а эффективность от него'\n ' в нынешнее время больше',\n reply_markup=keyboard)\n\n\n@bot.message_handler(func=lambda message: message.text == 'Стоимость', content_types=['text'])\ndef how_order(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_1 = types.KeyboardButton(text='\\U0001F519 \\U0001F916 Назад')\n button_2 = types.KeyboardButton(text='Основное меню')\n keyboard.add(button_1, button_2)\n bot.send_message(message.chat.id,\n 'Цена формируется в индивидуальном порядке исходя из требований и пожеланий наших заказчиков.'\n 'Создание чат-бота это работа не по шаблону и вкаждом конкретном случае всё зависит от\\n'\n '● целей и задач, которые должен решать бот,\\n'\n '● его функционала,\\n'\n '● наличия или отсутствия админ-панели,\\n'\n '● интеграции с внешним АPI,\\n'\n '● подключением CRM и/или иных систем,\\n'\n '● возможностью внутренней обработки и прогнозировки данных,\\n'\n '● подключением аналитических функций и прочего.\\n\\n'\n 'Иными словами,чем проще или сложнее вам потребуется бот, тем дешевле или дороже выйдет его стоимость. '\n 'Начальная стоимость, от 200$ долларов.\\n\\n'\n 'На всех этапах работы мы всегда готовы предоставить вам исчерпывающие консультации и помощь в составлении ТЗ,'\n ' а также предоставим лучшие рекомендации по внедрению бота в вашу работу \\U0001F60A',\n reply_markup=keyboard)\n\n\nif __name__ == '__main__':\n bot.polling(none_stop=True, interval=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "telebot.TeleBot", "line_number": 7, "usage_type": "call"}, {"api_name": "config.token", "line_number": 7, "usage_type": "attribute"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 17, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 17, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 18, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 18, "usage_type": "name"}, {"api_name": "mongodb.MongoAdd", "line_number": 29, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 30, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 30, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 31, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 31, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 32, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 32, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 33, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 33, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 34, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 34, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 35, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 35, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 50, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 50, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 51, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 51, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 52, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 52, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 53, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 53, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 54, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 54, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 55, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 55, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 70, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 70, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 71, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 71, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 72, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 72, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 95, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 95, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 96, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 96, "usage_type": "name"}, {"api_name": "telebot.types.KeyboardButton", "line_number": 97, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 97, "usage_type": "name"}]}
+{"seq_id": "98235830", "text": "import RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nfrom serial.utcp import UTCP\n\nser = serial.Serial(port=\"/dev/serial0\", baudrate=9600) # Open port with baud rate\nsender = UTCP(ser)\n \nSDA = 3\nSLC = 5\nTEC_Perisoltic = 7\nFan = 11\nPH_on = 13\nEC_on = 15\nDin = 19 #Pi4 output\nDout = 21 #Pi4 input\nSCLK = 23\nStep_CTRL = 29\nEC_Blue = 31\nEC_Red = 33\nRC_Grn = 35\nEC_Black = 37\n\nTX = 8\nRX = 10 # input \nheater = 12\nFloat = 16 #input\nwater_pump = 18\nAir_pump = 22\nADC_CS = 24 #put high\nPH_Blue = 32\nPH_Red = 36\nPH_Grn = 38\nPH_Black = 40 \nglobal control_pins\ncontrol_pins = [TEC_Perisoltic, Fan, PH_on, EC_on, Step_CTRL, heater, water_pump, Air_pump]\n\ndef off():\n global control_pins\n for pin in control_pins:\n GPIO.setup(pin, GPIO.OUT)\n \n for pin in control_pins:\n GPIO.output(pin, GPIO.LOW) \n\n for x in range(5): #turn off LEDs & solinoids\n sender.send(x, 4, 0) #solinoids\n sender.send(x, 5, 0) #red \n sender.send(x, 6, 0) #blue \n", "sub_path": "PI4/AtomgreensUI/Stop.py", "file_name": "Stop.py", "file_ext": "py", "file_size_in_byte": 1002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 2, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 2, "usage_type": "name"}, {"api_name": "RPi.GPIO.BOARD", "line_number": 2, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 3, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 3, "usage_type": "name"}, {"api_name": "serial.utcp.Serial", "line_number": 6, "usage_type": "call"}, {"api_name": "serial.utcp", "line_number": 6, "usage_type": "name"}, {"api_name": "serial.utcp.UTCP", "line_number": 7, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 41, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 41, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 44, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 44, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 44, "usage_type": "attribute"}]}
+{"seq_id": "243937690", "text": "from django import forms\nfrom django.core.validators import RegexValidator\n\nfrom .models import Account\nfrom django.contrib.auth.forms import UserCreationForm\nfrom projects.models import *\n\n\nclass UserRegisterationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n profile_picture = forms.ImageField(required=False)\n phone_regex = RegexValidator(regex=r'^[\\+02]?(01)(0|1|2|5)([0-9]{8})$',\n message=\"Sorry, Egyptian Phone numbers are only allowed.\")\n mobile = forms.CharField(validators=[phone_regex], max_length=14, required=False) # validators should be a list\n\n # facebook = forms.CharField(required=False, max_length=200)\n # public_info = forms.CharField(required=False, widget=forms.Textarea)\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'email', 'username', 'password1', 'password2', 'mobile', 'profile_picture']\n labels = {\n 'email': 'Email',\n 'first_name': 'First Name',\n 'last_name': 'Last Name',\n 'profile_picture': 'Profile Picture',\n }\n\n\nclass AccountUpdateForm(forms.ModelForm):\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'username', 'profile_picture', 'mobile', 'birthdate', 'country', 'facebook_profile']\n readonly_fields = ['email']\n\n def clean_email(self):\n if self.is_valid():\n email = self.cleaned_data['email']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(email=email)\n except Account.DoesNotExist:\n return email\n raise forms.ValidationError('email \"%s\" is already in use' % account.email)\n\n def clean_username(self):\n if self.is_valid():\n username = self.cleaned_data['username']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(username=username)\n except Account.DoesNotExist:\n return username\n raise forms.ValidationError('username \"%s\" is already in use' % account.username)\n \n def clean_firstname(self):\n if self.is_valid():\n first_name = self.cleaned_data['first_name']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(first_name=first_name)\n except Account.DoesNotExist:\n return first_name\n raise forms.ValidationError('first name \"%s\" is already in use' % account.first_name)\n\n def clean_lastname(self):\n if self.is_valid():\n last_name = self.cleaned_data['last_name']\n try:\n account = Account.objects.exclude(pk=self.instance.pk).get(last_name=last_name)\n except Account.DoesNotExist:\n return last_name\n raise forms.ValidationError('last name \"%s\" is already in use' % account.last_name)\n\n", "sub_path": "authenticate/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.ImageField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Account", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Account", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 42, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 43, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 45, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 52, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 54, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 54, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 60, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 61, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 63, "usage_type": "name"}, {"api_name": "models.Account.objects.exclude", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 69, "usage_type": "name"}, {"api_name": "models.Account.DoesNotExist", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 72, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 72, "usage_type": "name"}]}
+{"seq_id": "478557542", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('report_builder', '0004_auto_20160906_1149'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='report',\n name='chart_type',\n field=models.IntegerField(null=True, blank=True),\n ),\n ]\n", "sub_path": "report_builder/migrations/0005_report_chart_type.py", "file_name": "0005_report_chart_type.py", "file_ext": "py", "file_size_in_byte": 422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "340434745", "text": "import ConfigSpace\nimport numpy as np\nimport numpy.random\n\ndef make_config_compatible(config, config_space):\n if isinstance(config, dict):\n config = config\n else:\n config = config.get_dictionary()\n\n # remove illegal values\n config = {k: v for k, v in config.items() \n if k in config_space.get_hyperparameter_names()\n and config_space.get_hyperparameter(k).is_legal(v)}\n\n # add values missing for current config space: random value\n for hp in config_space.get_hyperparameters():\n if hp.name not in config and isinstance(hp, ConfigSpace.hyperparameters.Constant):\n config[hp.name] = hp.value\n elif hp.name not in config:\n config[hp.name] = hp.sample(config_space.random)\n\n # delete values for inactive hyperparameters\n config = ConfigSpace.util.deactivate_inactive_hyperparameters(\n\t\t\t\t\t\t\t\t\tconfiguration_space=config_space,\n\t\t\t\t\t\t\t\t\tconfiguration=config)\n return ConfigSpace.Configuration(config_space, config)\n\ndef make_bw_compatible(bw, from_configspace, to_configspace):\n bw = insert_constant(bw, from_configspace)\n result = np.zeros(len(to_configspace.get_hyperparameter_names()))\n for i in range(len(bw)):\n j = transform_hyperparameter_index(i, from_configspace, to_configspace)\n if j is not None:\n result[j] = bw[i]\n return filter_constant(result, to_configspace)\n\ndef make_vector_compatible(vector, from_configspace, to_configspace, imputer):\n vector = np.asanyarray(vector)\n vector = insert_constant(vector, from_configspace)\n x = np.array(vector).reshape((-1, len(from_configspace.get_hyperparameters())))\n c = np.zeros((x.shape[0], len(to_configspace.get_hyperparameters()))) * np.nan\n\n # copy given values at correct index\n for i in range(x.shape[1]):\n j = transform_hyperparameter_index(i, from_configspace, to_configspace)\n if j is not None:\n c[:, j] = transform_hyperparameter(from_configspace, to_configspace, i, j, x[:, i])\n return imputer(filter_constant(c, to_configspace))\n\ndef transform_hyperparameter_index(idx, from_configspace, to_configspace):\n hp_name = from_configspace.get_hyperparameter_by_idx(idx)\n try:\n return to_configspace.get_idx_by_hyperparameter_name(hp_name)\n except:\n return None\n\ndef transform_hyperparameter(from_configspace, to_configspace, from_idx, to_idx, vector):\n from_hp = from_configspace.get_hyperparameter(from_configspace.get_hyperparameter_by_idx(from_idx))\n to_hp = to_configspace .get_hyperparameter(to_configspace .get_hyperparameter_by_idx(to_idx))\n result = np.ones(vector.shape) * np.nan\n for i, v in enumerate(vector):\n try:\n transformed = from_hp._transform(v)\n except:\n print(\"\\nvalue:\", v)\n print(\"hp:\", from_hp)\n print(\"to hp:\", to_hp)\n raise\n transformed = transformed[0] if isinstance(transformed, np.ndarray) else transformed\n if to_hp.is_legal(transformed):\n result[i] = to_hp._inverse_transform(transformed)\n return result\n\n\ndef num_non_constant_hps(cs):\n return np.sum(~constant_hypers(cs))\n\n\ndef filter_constant(array, cs):\n if len(array.shape) == 1:\n return array[~constant_hypers(cs)]\n else:\n return array[:, ~constant_hypers(cs)]\n\n\ndef constant_hypers(cs):\n constant_idxs = []\n for hyper in cs.get_hyperparameters():\n idx = cs.get_idx_by_hyperparameter_name(hyper.name)\n if is_constant(hyper):\n constant_idxs.append(idx)\n return np.array([i in constant_idxs for i in range(len(cs.get_hyperparameters()))])\n\n\ndef is_constant(hyper):\n if isinstance(hyper, ConfigSpace.hyperparameters.Constant):\n return True\n\n elif isinstance(hyper, ConfigSpace.hyperparameters.CategoricalHyperparameter):\n if len(hyper.choices) == 1:\n return True\n\n return False\n\n\ndef insert_constant(array, cs):\n if len(array.shape) == 1:\n result = np.zeros(len(cs.get_hyperparameters()))\n else:\n result = np.zeros((array.shape[0], len(cs.get_hyperparameters())))\n\n non_constant_pointer = 0\n for i, constant in enumerate(constant_hypers(cs)):\n if not constant and len(array.shape) == 1:\n result[i] = array[non_constant_pointer]\n non_constant_pointer += 1\n elif not constant:\n result[:, i] = array[:, non_constant_pointer]\n non_constant_pointer += 1\n return result\n", "sub_path": "hpbandster/metalearning/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 4490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ConfigSpace.hyperparameters", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ConfigSpace.util.deactivate_inactive_hyperparameters", "line_number": 24, "usage_type": "call"}, {"api_name": "ConfigSpace.util", "line_number": 24, "usage_type": "attribute"}, {"api_name": "ConfigSpace.Configuration", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "ConfigSpace.hyperparameters", "line_number": 97, "usage_type": "attribute"}, {"api_name": "ConfigSpace.hyperparameters", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}]}
+{"seq_id": "475562155", "text": "from django.core.management.base import BaseCommand\n\nimport log\n\nfrom elections.models import District, DistrictCategory, Party\n\n\nclass Command(BaseCommand):\n help = \"Migrate data between existing models and initialize constants\"\n\n def handle(self, verbosity: int, **_kwargs):\n log.init(reset=True, verbosity=verbosity)\n\n self.initialize_parties()\n self.initialize_districts()\n\n def initialize_parties(self):\n for name, color in [\n # Placeholders\n (\"Nonpartisan\", '#999'),\n (\"No Party Affiliation\", '#999'),\n # Parties\n (\"Democratic\", '#3333FF'),\n (\"Green\", '#00A95C'),\n (\"Libertarian\", '#ECC850'),\n (\"Natural Law\", '#FFF7D6'),\n (\"Republican\", '#E81B23'),\n (\"U.S. Taxpayers\", '#A356DE'),\n (\"Working Class\", '#A30000'),\n ]:\n party, created = Party.objects.update_or_create(\n name=name, defaults=dict(color=color)\n )\n if created:\n self.stdout.write(f'Added party: {party}')\n\n def initialize_districts(self):\n state, created = DistrictCategory.objects.get_or_create(name=\"State\")\n if created:\n self.stdout.write(f'Added district category: {state}')\n\n for name in [\n # State\n \"County\",\n \"Jurisdiction\",\n \"Precinct\",\n # Local\n \"City\",\n \"District Library\",\n \"Local School\",\n \"Intermediate School\",\n \"Township\",\n \"Metropolitan\",\n \"Village\",\n \"Authority\",\n \"Library\",\n ]:\n category, created = DistrictCategory.objects.get_or_create(name=name)\n if created:\n self.stdout.write(f'Added district category: {category}')\n\n michigan, created = District.objects.get_or_create(\n category=state, name=\"Michigan\"\n )\n if created:\n self.stdout.write(f'Added district: {michigan}')\n", "sub_path": "elections/management/commands/migrate_data.py", "file_name": "migrate_data.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 8, "usage_type": "name"}, {"api_name": "log.init", "line_number": 12, "usage_type": "call"}, {"api_name": "elections.models.Party.objects.update_or_create", "line_number": 31, "usage_type": "call"}, {"api_name": "elections.models.Party.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "elections.models.Party", "line_number": 31, "usage_type": "name"}, {"api_name": "elections.models.DistrictCategory.objects.get_or_create", "line_number": 38, "usage_type": "call"}, {"api_name": "elections.models.DistrictCategory.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "elections.models.DistrictCategory", "line_number": 38, "usage_type": "name"}, {"api_name": "elections.models.DistrictCategory.objects.get_or_create", "line_number": 58, "usage_type": "call"}, {"api_name": "elections.models.DistrictCategory.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "elections.models.DistrictCategory", "line_number": 58, "usage_type": "name"}, {"api_name": "elections.models.District.objects.get_or_create", "line_number": 62, "usage_type": "call"}, {"api_name": "elections.models.District.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "elections.models.District", "line_number": 62, "usage_type": "name"}]}
+{"seq_id": "134713025", "text": "import os\nimport requests\nimport datetime as dt\nfrom twilio.rest import Client\n\n\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nSTOCK_FLUCTUATION_VALUE = 0.5 # %\nDATE_YESTERDAY = dt.date.today() - dt.timedelta(1)\nDATE_DAY_BEFORE_YESTERDAY = dt.date.today() - dt.timedelta(2)\n# API keys\nSTOCK_API_KEY = os.environ.get(\"STOCK_API_KEY\")\nNEWS_API_KEY = os.environ.get(\"NEWS_API_KEY\")\nSMS_API_KEY = os.environ.get(\"TWILIO_API_KEY\")\n# Twilio settings\nTWILIO_ACCOUNT_SID = os.environ.get(\"TWILIO_ACCOUNT_SID\")\nTWILIO_AUTH_TOKEN = os.environ.get(\"TWILIO_AUTH_TOKEN\")\n\n\n## STEP 1: Use https://www.alphavantage.co\n# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\ndef get_stock_price_change():\n\n parameters = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": STOCK,\n \"apikey\": STOCK_API_KEY,\n }\n\n response = requests.get(url=\"https://www.alphavantage.co/query\", params=parameters)\n response.raise_for_status()\n content = response.json()[\"Time Series (Daily)\"]\n diference = float(content[str(DATE_YESTERDAY)][\"4. close\"]) - float(\n content[str(DATE_DAY_BEFORE_YESTERDAY)][\"4. close\"]\n )\n variation = (diference / float(content[str(DATE_YESTERDAY)][\"4. close\"])) * 100\n return round(variation, 2)\n\n\n## STEP 2: Use https://newsapi.org\n# Instead of printing (\"Get News\"), actually get the first 3 news pieces for the COMPANY_NAME.\ndef get_company_news():\n\n parameters = {\n \"q\": COMPANY_NAME,\n \"from\": DATE_YESTERDAY,\n \"sortBy\": \"popularity\",\n \"apikey\": NEWS_API_KEY,\n }\n\n response = requests.get(url=\"https://newsapi.org/v2/everything\", params=parameters)\n response.raise_for_status()\n content = response.json()[\"articles\"]\n return f\"Headline: {content[0]['title']}\\nBrief: {content[0]['description']}\\nLink: {content[0]['url']}\"\n\n\n## STEP 3: Use https://www.twilio.com\n# Send a seperate message with the percentage change and each article's title and description to your phone number.\ndef send_sms(stock_fluctuation, news):\n\n if stock_fluctuation > 0:\n content = f\"{STOCK}: ⬆️{abs(stock_fluctuation)}%\\n{news}\"\n else:\n content = f\"{STOCK}: ⬇️{abs(stock_fluctuation)}%\\n{news}\"\n\n client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n message = client.messages.create(\n body=content,\n from_=\"\", # Twilio free trial phone number\n to=\"\", # Your phone number, that was used in twilio\n )\n print(message.sid)\n\n\nif __name__ == \"__main__\":\n stock_fluctuation = get_stock_price_change()\n print(stock_fluctuation)\n if abs(stock_fluctuation) >= STOCK_FLUCTUATION_VALUE:\n news = get_company_news()\n print(news)\n send_sms(stock_fluctuation, news)\n", "sub_path": "python/100_Days_of_Code/Intermediate+/day36/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.date.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "twilio.rest.Client", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "310271753", "text": "from django.urls import path\nfrom . import views\n\napp_name = \"blog\"\n\nurlpatterns = [\n path('', views.index_view, name=\"index\"),\n path('new_post/', views.new_post_view, name='new_post'),\n path('list/', views.PostListView.as_view(), name='list'),\n path('list//', views.PostDetailView.as_view(), name='read_post'),\n path('create/', views.PostCreateView.as_view(), name='create'),\n path('update//', views.PostUpdateView.as_view(), name='update'),\n path('delete//', views.PostDeleteView.as_view(), name='delete'),\n]\n", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "53710815", "text": "import re\r\n\r\nfrom django.db.transaction import set_autocommit, rollback, commit\r\n\r\nfrom account.models import City, School, Tag, Profile, ProfileHighSchoolInfo, \\\r\n ProfileCollegeInfo, ProfileTagVote, ContactRecord\r\n\r\n\r\ndef bulk_create(file):\r\n from openpyxl import load_workbook\r\n \r\n wb = load_workbook(file, use_iterators = True)\r\n ws = wb.worksheets[0]\r\n row_counter = 1\r\n \r\n city_dict = dict([(city.name, city) for city in City.objects.all()])\r\n high_school_dict = dict([(s.name, s) for s in School.objects.filter(type=School.HIGH_SCHOOL_TYPE_INDEX)])\r\n college_dict = dict([(s.name, s) for s in School.objects.filter(type=School.COLLEGE_TYPE_INDEX)])\r\n tag_dict = dict([(t.name, t) for t in Tag.objects.all()])\r\n num_extract_regex = re.compile('(?<=[^\\d])\\d+(?=[^\\d])')\r\n mobile_profile_dict = {}\r\n index_profile_dict = {}\r\n raw_encrypted_dict = {}\r\n def set_mobile_field(profile, raw_mobile_num, mobile_field_name): \r\n if raw_mobile_num:\r\n profile.set_mobile(raw_mobile_num, mobile_field_name)\r\n raw_encrypted_dict[raw_mobile_num] = getattr(profile, mobile_field_name)\r\n \r\n def save_profile_dict(profile, mobile_field_name, mobile_num):\r\n if getattr(profile, mobile_field_name):\r\n mobile_profile_dict[mobile_num] = profile\r\n\r\n set_autocommit(False)\r\n for row in ws.iter_rows(row_offset=1): # it brings a new method: iter_rows()\r\n row_counter = row_counter + 1\r\n try:\r\n p = Profile()\r\n set_mobile_field(p, row[0].value, Profile.MOBBILE0_FIELD)\r\n set_mobile_field(p, row[1].value, Profile.MOBBILE1_FIELD)\r\n set_mobile_field(p, row[2].value, Profile.MOBBILE2_FIELD)\r\n p.date_of_birth = row[3].value\r\n p.save()\r\n save_profile_dict(p, Profile.MOBBILE0_FIELD, row[0].value)\r\n save_profile_dict(p, Profile.MOBBILE1_FIELD, row[1].value)\r\n save_profile_dict(p, Profile.MOBBILE2_FIELD, row[2].value)\r\n index_profile_dict[row_counter] = p\r\n if city_dict.get(row[4].value, None):\r\n p.cities.add(city_dict.get(row[4].value))\r\n if city_dict.get(row[5].value, None):\r\n p.cities.add(city_dict.get(row[5].value))\r\n if high_school_dict.get(row[6].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': high_school_dict.get(row[6].value),\r\n 'date_joined': row[7].value,\r\n 'date_graduated': row[8].value,\r\n }\r\n phsi = ProfileHighSchoolInfo(**kwarg)\r\n phsi.save()\r\n if high_school_dict.get(row[9].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': high_school_dict.get(row[9].value),\r\n 'date_joined': row[10].value,\r\n 'date_graduated': row[11].value,\r\n }\r\n phsi = ProfileHighSchoolInfo(**kwarg)\r\n phsi.save()\r\n if college_dict.get(row[12].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[12].value),\r\n 'date_joined': row[13].value,\r\n 'date_graduated': row[14].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n if college_dict.get(row[15].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[15].value),\r\n 'date_joined': row[16].value,\r\n 'date_graduated': row[17].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n if college_dict.get(row[18].value, None):\r\n kwarg = {\r\n 'profile': p,\r\n 'school': college_dict.get(row[18].value),\r\n 'date_joined': row[19].value,\r\n 'date_graduated': row[20].value,\r\n }\r\n pci = ProfileCollegeInfo(**kwarg)\r\n pci.save()\r\n \r\n tags = re.split('[,锛� ]+', row[21].value)\r\n for tag in tags:\r\n tag = tag.strip()\r\n if not tag:\r\n continue\r\n vote = num_extract_regex.findall(tag)\r\n vote = int(vote[0]) if len(vote) else 0\r\n tag = re.split('[\\(锛� ]+', tag)[0]\r\n if tag_dict.get(tag, None):\r\n ptv = ProfileTagVote(profile=p, tag=tag_dict.get(tag), count=vote)\r\n ptv.save()\r\n except Exception as e:\r\n e.row_counter = row_counter\r\n rollback()\r\n raise e\r\n \r\n row_counter = 1\r\n for row in ws.iter_rows(row_offset=1): # it brings a new method: iter_rows()\r\n row_counter = row_counter + 1\r\n try:\r\n mobile_nums = re.split('[,锛� ]+', row[22].value)\r\n for mobile_num in mobile_nums:\r\n mobile_num = mobile_num.strip()\r\n if mobile_profile_dict.get(mobile_num):\r\n ContactRecord(from_profile=index_profile_dict.get(row_counter),\r\n to_profile=mobile_profile_dict.get(mobile_num),\r\n encrypted_mobile=raw_encrypted_dict.get(mobile_num)).save()\r\n except Exception as e:\r\n e.row_counter = row_counter\r\n rollback()\r\n raise e\r\n \r\n commit()\r\n return row_counter\r\n", "sub_path": "account/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 12, "usage_type": "call"}, {"api_name": "account.models.City.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "account.models.City.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "account.models.City", "line_number": 16, "usage_type": "name"}, {"api_name": "account.models.School.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "account.models.School.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "account.models.School", "line_number": 17, "usage_type": "name"}, {"api_name": "account.models.School.HIGH_SCHOOL_TYPE_INDEX", "line_number": 17, "usage_type": "attribute"}, {"api_name": "account.models.School.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "account.models.School.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "account.models.School", "line_number": 18, "usage_type": "name"}, {"api_name": "account.models.School.COLLEGE_TYPE_INDEX", "line_number": 18, "usage_type": "attribute"}, {"api_name": "account.models.Tag.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "account.models.Tag.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "account.models.Tag", "line_number": 19, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.transaction.set_autocommit", "line_number": 33, "usage_type": "call"}, {"api_name": "account.models.Profile", "line_number": 37, "usage_type": "call"}, {"api_name": "account.models.Profile.MOBBILE0_FIELD", "line_number": 38, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 38, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE1_FIELD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 39, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE2_FIELD", "line_number": 40, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 40, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE0_FIELD", "line_number": 43, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 43, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE1_FIELD", "line_number": 44, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 44, "usage_type": "name"}, {"api_name": "account.models.Profile.MOBBILE2_FIELD", "line_number": 45, "usage_type": "attribute"}, {"api_name": "account.models.Profile", "line_number": 45, "usage_type": "name"}, {"api_name": "account.models.ProfileHighSchoolInfo", "line_number": 58, "usage_type": "call"}, {"api_name": "account.models.ProfileHighSchoolInfo", "line_number": 67, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 76, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 85, "usage_type": "call"}, {"api_name": "account.models.ProfileCollegeInfo", "line_number": 94, "usage_type": "call"}, {"api_name": "re.split", "line_number": 97, "usage_type": "call"}, {"api_name": "re.split", "line_number": 104, "usage_type": "call"}, {"api_name": "account.models.ProfileTagVote", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.transaction.rollback", "line_number": 110, "usage_type": "call"}, {"api_name": "re.split", "line_number": 117, "usage_type": "call"}, {"api_name": "account.models.ContactRecord", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.transaction.rollback", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.transaction.commit", "line_number": 129, "usage_type": "call"}]}
+{"seq_id": "114313541", "text": "from threading import Thread\nfrom threading import Lock\nfrom colorlog import ColoredFormatter\nimport logging\n\n\nformatter = ColoredFormatter(\n\t'%(log_color)s[%(asctime)-8s] %(module)s (%(process)d %(threadName)s): %(message_log_color)s%(message)s',\n\tdatefmt=None,\n\treset=True,\n\tlog_colors={\n\t\t'DEBUG': 'blue',\n\t\t'INFO': 'green',\n\t\t'WARNING': 'yellow',\n\t\t'ERROR': 'red',\n\t\t'CRITICAL': 'red',\n\t},\n\tsecondary_log_colors={\n\t\t'message': {\n\t\t\t'DEBUG': 'purple',\n\t\t\t'INFO': 'yellow',\n\t\t\t'WARNING': 'green',\n\t\t\t'ERROR': 'yellow',\n\t\t\t'CRITICAL': 'red',\n\t\t}\n\t},\n\tstyle = '%'\n)\n\nstream = logging.StreamHandler()\nstream.setFormatter(formatter)\n\nlogger = logging.getLogger('pastry.py')\nlogger.addHandler(stream)\nlogger.setLevel (logging.DEBUG)\n\n\nclass Node:\n\tdef __init__ (self, port, auth):\n\t\tself.port = port\n\t\tself.auth = auth\n\t\tlogger.debug ('listening on port %d', self.port)\n\n\tdef bootstrap (self, seeds = []):\n\t\tfor seed in seeds:\n\t\t\tlogger.debug ('bootstraping from %s', str (seed))\n\n\tdef get (self, key):\n\t\tpass\n\n\tdef set (self, key, data):\n\t\tpass\n\n\n\t\n\t\n", "sub_path": "pastry/node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 1071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "colorlog.ColoredFormatter", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}]}
+{"seq_id": "73944803", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 27 21:02:50 2018\r\n\r\n@author: CS\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nimport numpy as np\r\nnp.random.seed(1337) # for reproducibility\r\n\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation\r\nfrom keras.optimizers import SGD, Adam, RMSprop\r\nfrom keras.utils import np_utils\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport tensorflow as tf \r\nfrom keras import backend as k\r\n\r\n'''def categorical_hinge(y_true, y_pred):\r\n pos = k.sum(y_true * y_pred, axis=-1)\r\n neg =k.max((1.0 - y_true) * y_pred, axis=-1)\r\n return k.mean(k.maximum(0.0, neg - pos + 1), axis=-1)\r\n'''\r\n\r\n\r\n# Data Preparing\r\n\r\nbatch_size = 128\r\nnr_classes = 10 #62\r\nnr_iterations = 100\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\n\r\nX_train = X_train.reshape(60000, 784) #Done\r\nX_test = X_test.reshape(10000, 784) #Done\r\nX_train = X_train.astype('float32') #Done\r\nX_test = X_test.astype('float32') #Done\r\nX_train /= 255 #Done\r\nX_test /= 255 #Done\r\n\r\n\r\nY_train = np_utils.to_categorical(y_train, nr_classes) # ??\r\nY_test = np_utils.to_categorical(y_test, nr_classes) # ??\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(10, input_shape=(784,)))\r\n\r\nX_val=X_train[0:10000,:]\r\nY_val=Y_train[0:10000,:]\r\n\r\nX_train=X_train[10000:60000,:]\r\nY_train=Y_train[10000:60000,:]\r\n\r\n\r\nmodel.add(Activation('softmax'))\r\nmodel.summary()\r\nmodel.compile(loss='hinge',\r\n optimizer='sgd',\r\n metrics=['accuracy'])\r\n\r\nsaved_weights_name='SVMWeights.h5'\r\n\r\ncheckpoint = ModelCheckpoint(saved_weights_name, \r\n monitor='val_acc', \r\n verbose=1, \r\n save_best_only=True, \r\n mode='max')\r\n\r\nhistory = model.fit(X_train, Y_train,\r\n batch_size = batch_size, nb_epoch = nr_iterations,\r\n verbose = 1, validation_data = (X_val, Y_val) ,callbacks=[checkpoint])\r\n\r\nscore = model.evaluate(X_test, Y_test, verbose = 0)", "sub_path": "SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 2101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "keras.datasets.mnist.load_data", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 33, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 44, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "215193213", "text": "#!/usr/bin/env python3\nfrom setuptools import setup\n\nwith open(\"README.md\", encoding='utf8') as readme:\n long_description = readme.read()\n\nsetup(\n name=\"HawkEye\",\n version=\"1.0\",\n author=\"Abdallah Elshinbary\",\n url=\"https://github.com/N1ght-W0lf/HawkEye\",\n description=(\"Malware dynamic instrumentation tool based on frida framework\"),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"psutil\",\n \"frida\",\n ],\n packages=[\"hawkeye\",\n ],\n entry_points={\n \"console_scripts\": [\n \"hawkeye = hawkeye.HawkEye:main\",\n ],\n },\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}]}
+{"seq_id": "497120263", "text": "from django import forms\nfrom django.forms import SelectDateWidget\nfrom ipdb import set_trace\n\nfrom blog.models import Post\n\n\nclass CreatePostForm(forms.ModelForm):\n class Meta:\n model = Post\n fields = ('title', 'text', 'published_date')\n\n def __init__(self, user, *args, **kwargs):\n super().__init__(auto_id='custom_id_%s', *args, **kwargs)\n self.user = user\n self.fields['published_date'].widget = SelectDateWidget()\n self.fields['title'].widget = forms.TextInput(attrs={'placeholder': 'place title here'})\n self.fields['title'].initial = \"POST No: {}\".format(Post.objects.count() + 1)\n\n def save(self, commit=True):\n post = self.instance\n post.author = self.user\n super().save()\n\n\nclass UpdatePostForm(forms.ModelForm):\n\n class Meta:\n model = Post\n fields = ('title', 'text', 'published_date')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['published_date'].widget = SelectDateWidget()\n\n\n\nfrom blog.signals import *\n", "sub_path": "blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "blog.models.Post", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.SelectDateWidget", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms.TextInput", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "blog.models.Post.objects.count", "line_number": 18, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "blog.models.Post", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.SelectDateWidget", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "401234362", "text": "#!/usr/bin/python\nimport argparse\nimport json\nimport web3\nimport sys\nimport logging\n# import pymongo\nimport progressbar\n\nfrom pymongo import MongoClient\nfrom bson import Decimal128\n\nfrom mnemonic import Mnemonic\n\nfrom datetime import datetime\nfrom web3 import Web3\nfrom hexbytes import HexBytes\n\nfrom helper import query_yes_no\n\nlogging.basicConfig(level=logging.INFO)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-d', '--database', type=str, help='Name of the MongoDB database', required=True)\nparser.add_argument('-s', '--start-block', type=int, help='Start block')\nparser.add_argument('-e', '--end-block', type=int, help='End block')\nparser.add_argument('--drop', action='store_true', help='Drop existing DB before scraping')\nparser.add_argument('--skip-confirmation', action='store_true', help='Skip asking for confirmation for dropping the DB')\n\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-a', '--addr', type=str, help='Comma-separated list of addresses from and to which txs will be filtered')\ngroup.add_argument('-f', '--file', type=str, help='File containing addresses from and to which txs will be filtered')\n\n\ndef tx_to_dict(tx):\n result = {}\n for key, val in tx.items():\n if isinstance(val, HexBytes):\n result[key] = val.hex()\n else:\n result[key] = val\n\n if 'value' in result: result['value'] = Decimal128(str(result['value']))\n if 'gasPrice' in result: result['gasPrice'] = Decimal128(str(result['gasPrice']))\n\n return result\n\ndef block_to_dict(tx):\n result = {}\n for key, val in tx.items():\n if isinstance(val, HexBytes):\n result[key] = val.hex()\n else:\n result[key] = val\n\n if 'difficulty' in result: result['difficulty'] = Decimal128(str(result['difficulty']))\n if 'totalDifficulty' in result: result['totalDifficulty'] = Decimal128(str(result['totalDifficulty']))\n\n return result\n\n\ndef __main__():\n args = parser.parse_args()\n\n provider = Web3.WebsocketProvider('wss://mainnet.infura.io/ws/')\n # provider = Web3.HTTPProvider('https://mainnet.infura.io/')\n # provider = Web3.IPCProvider()\n w3 = Web3(provider)\n\n if args.start_block:\n start_block = args.start_block\n else:\n start_block = 0\n\n if args.end_block:\n end_block = args.end_block\n else:\n end_block = w3.eth.blockNumber\n\n client = MongoClient()\n\n dbnames = client.list_database_names()\n\n if args.drop and args.database in dbnames:\n if not args.skip_confirmation:\n if not query_yes_no('Are you sure you want to drop existing DB: '+args.database, default='no'):\n sys.exit()\n\n client.drop_database(args.database)\n\n db = client[args.database]\n\n block_collection = db['blocks']\n tx_collection = db['transactions']\n txreceipt_collection = db['txreceipts']\n\n filtered_addrs = []\n if args.addr:\n filtered_addrs += args.addr.split(',')\n elif args.file:\n filtered_addrs += open(args.file, 'r').read().split('\\n')\n\n filtered_addrs = [i.lower() for i in filtered_addrs if Web3.isAddress(i)]\n\n bar = progressbar.ProgressBar(max_value=end_block-start_block)\n\n tx_count = 0\n\n for idx in range(start_block, end_block+1):\n bar.update(idx-start_block)\n\n block = w3.eth.getBlock(idx, full_transactions=True)\n\n block_without_tx = block_to_dict(block)\n if 'transactions' in block_without_tx:\n del block_without_tx['transactions']\n\n block_collection.insert_one(block_without_tx)\n\n txs = block.transactions\n\n lines = []\n\n for n, tx in enumerate(txs):\n if tx['to']:\n to_matches = tx['to'].lower() in filtered_addrs\n else:\n to_matches = False\n\n if tx['from']:\n from_matches = tx['from'].lower() in filtered_addrs\n else:\n from_matches = False\n\n if to_matches or from_matches or filtered_addrs == []:\n # print('Found tx: %s'%tx['hash'].hex())\n\n tx_collection.insert_one(tx_to_dict(tx))\n\n tx_receipt = w3.eth.getTransactionReceipt(tx['hash'])\n txreceipt_collection.insert_one(tx_to_dict(tx_receipt))\n\n tx_count += 1\n\n bar.finish()\n txreceipt_collection.create_index('transactionHash')\n\n logging.info('Finished importing %d txs from %d blocks'%(tx_count, end_block-start_block))\n\n # if len(lines) > 0:\n # if args.readable:\n # ofile.write('// Block %d at %s including %d txs, %d unique addresses, diversity: %d%%, gas used: %d\\n'%(block.number, datetime.fromtimestamp(block.timestamp), len(block.transactions), len(unique_addresses), diversity*100, block.gasUsed))\n\n\nif __name__ == '__main__':\n __main__()\n", "sub_path": "scrape_txs_node.py", "file_name": "scrape_txs_node.py", "file_ext": "py", "file_size_in_byte": 4819, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 38, "usage_type": "argument"}, {"api_name": "bson.Decimal128", "line_number": 43, "usage_type": "call"}, {"api_name": "bson.Decimal128", "line_number": 44, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 51, "usage_type": "argument"}, {"api_name": "bson.Decimal128", "line_number": 56, "usage_type": "call"}, {"api_name": "bson.Decimal128", "line_number": 57, "usage_type": "call"}, {"api_name": "web3.Web3.WebsocketProvider", "line_number": 65, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 65, "usage_type": "name"}, {"api_name": "web3.Web3", "line_number": 68, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 80, "usage_type": "call"}, {"api_name": "helper.query_yes_no", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}, {"api_name": "web3.Web3.isAddress", "line_number": 103, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 103, "usage_type": "name"}, {"api_name": "progressbar.ProgressBar", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}]}
+{"seq_id": "195318413", "text": "import luigi\n\nfrom ...abstract_method_exception import AbstractMethodException\nfrom ...lib.test_environment.populate_data import PopulateEngineSmallTestDataToDatabase\nfrom ...lib.test_environment.upload_exa_jdbc import UploadExaJDBC\nfrom ...lib.test_environment.upload_virtual_schema_jdbc_adapter import UploadVirtualSchemaJDBCAdapter\nfrom ...lib.base.dependency_logger_base_task import DependencyLoggerBaseTask\nfrom ...lib.data.container_info import ContainerInfo\nfrom ...lib.data.database_credentials import DatabaseCredentialsParameter\nfrom ...lib.data.database_info import DatabaseInfo\nfrom ...lib.data.docker_network_info import DockerNetworkInfo\nfrom ...lib.data.environment_info import EnvironmentInfo\nfrom ...lib.test_environment.general_spawn_test_environment_parameter import \\\n GeneralSpawnTestEnvironmentParameter\nfrom ...lib.test_environment.spawn_test_container import SpawnTestContainer\n\nDATABASE = \"database\"\n\nTEST_CONTAINER = \"test_container\"\n\n\nclass AbstractSpawnTestEnvironment(DependencyLoggerBaseTask,\n GeneralSpawnTestEnvironmentParameter,\n DatabaseCredentialsParameter):\n environment_name = luigi.Parameter()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.test_container_name = f\"\"\"test_container_{self.environment_name}\"\"\"\n self.network_name = f\"\"\"db_network_{self.environment_name}\"\"\"\n\n def get_environment_type(self):\n raise AbstractMethodException()\n\n def run_task(self):\n test_environment_info = yield from self._attempt_database_start()\n yield from self._setup_test_database(test_environment_info)\n self.return_object(test_environment_info)\n\n def _attempt_database_start(self):\n is_database_ready = False\n attempt = 0\n database_info = None\n test_container_info = None\n while not is_database_ready and attempt < self.max_start_attempts:\n network_info, database_info, is_database_ready, test_container_info = \\\n yield from self._start_database(attempt)\n attempt += 1\n if not is_database_ready and not attempt < self.max_start_attempts:\n raise Exception(f\"Maximum attempts {attempt} to start the database reached.\")\n test_environment_info = \\\n EnvironmentInfo(name=self.environment_name,\n env_type=self.get_environment_type(),\n database_info=database_info,\n test_container_info=test_container_info,\n network_info=network_info)\n return test_environment_info\n\n def _start_database(self, attempt):\n network_info = yield from self._create_network(attempt)\n database_info, test_container_info = \\\n yield from self._spawn_database_and_test_container(network_info, attempt)\n is_database_ready = yield from self._wait_for_database(\n database_info, test_container_info, attempt)\n return network_info, database_info, is_database_ready, test_container_info\n\n def _create_network(self, attempt):\n network_info_future = yield from self.run_dependencies(self.create_network_task(attempt))\n network_info = self.get_values_from_future(network_info_future)\n return network_info\n\n def create_network_task(self, attempt: int):\n raise AbstractMethodException()\n\n def _spawn_database_and_test_container(self,\n network_info: DockerNetworkInfo,\n attempt: int):\n database_and_test_container_info_future = \\\n yield from self.run_dependencies({\n TEST_CONTAINER: SpawnTestContainer(\n environment_name=self.environment_name,\n test_container_name=self.test_container_name,\n network_info=network_info,\n ip_address_index_in_subnet=1,\n reuse_test_container=self.reuse_test_container,\n no_test_container_cleanup_after_end=self.no_test_container_cleanup_after_end,\n attempt=attempt),\n DATABASE: self.create_spawn_database_task(network_info, attempt)\n })\n database_and_test_container_info = \\\n self.get_values_from_futures(database_and_test_container_info_future)\n test_container_info = database_and_test_container_info[TEST_CONTAINER]\n database_info = database_and_test_container_info[DATABASE]\n return database_info, test_container_info\n\n def create_spawn_database_task(self,\n network_info: DockerNetworkInfo,\n attempt: int):\n raise AbstractMethodException()\n\n def _wait_for_database(self,\n database_info: DatabaseInfo,\n test_container_info: ContainerInfo,\n attempt: int):\n database_ready_target_future = \\\n yield from self.run_dependencies(\n self.create_wait_for_database_task(\n attempt, database_info, test_container_info))\n is_database_ready = self.get_values_from_futures(database_ready_target_future)\n return is_database_ready\n\n def create_wait_for_database_task(self,\n attempt: int,\n database_info: DatabaseInfo,\n test_container_info: ContainerInfo):\n raise AbstractMethodException()\n\n def _setup_test_database(self, test_environment_info: EnvironmentInfo):\n # TODO check if database is setup\n if self.is_setup_database_activated:\n self.logger.info(\"Setup database\")\n upload_tasks = [\n self.create_child_task_with_common_params(\n UploadExaJDBC,\n test_environment_info=test_environment_info,\n reuse_uploaded=self.reuse_database_setup),\n self.create_child_task_with_common_params(\n UploadVirtualSchemaJDBCAdapter,\n test_environment_info=test_environment_info,\n reuse_uploaded=self.reuse_database_setup),\n self.create_child_task_with_common_params(\n PopulateEngineSmallTestDataToDatabase,\n test_environment_info=test_environment_info,\n reuse_data=self.reuse_database_setup\n )]\n yield from self.run_dependencies(upload_tasks)\n", "sub_path": "src/lib/test_environment/abstract_spawn_test_environment.py", "file_name": "abstract_spawn_test_environment.py", "file_ext": "py", "file_size_in_byte": 6644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "lib.base.dependency_logger_base_task.DependencyLoggerBaseTask", "line_number": 22, "usage_type": "name"}, {"api_name": "lib.test_environment.general_spawn_test_environment_parameter.GeneralSpawnTestEnvironmentParameter", "line_number": 23, "usage_type": "name"}, {"api_name": "lib.data.database_credentials.DatabaseCredentialsParameter", "line_number": 24, "usage_type": "name"}, {"api_name": "luigi.Parameter", "line_number": 25, "usage_type": "call"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.data.environment_info.EnvironmentInfo", "line_number": 52, "usage_type": "call"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 73, "usage_type": "call"}, {"api_name": "lib.data.docker_network_info.DockerNetworkInfo", "line_number": 76, "usage_type": "name"}, {"api_name": "lib.test_environment.spawn_test_container.SpawnTestContainer", "line_number": 80, "usage_type": "call"}, {"api_name": "lib.data.docker_network_info.DockerNetworkInfo", "line_number": 97, "usage_type": "name"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 99, "usage_type": "call"}, {"api_name": "lib.data.database_info.DatabaseInfo", "line_number": 102, "usage_type": "name"}, {"api_name": "lib.data.container_info.ContainerInfo", "line_number": 103, "usage_type": "name"}, {"api_name": "lib.data.database_info.DatabaseInfo", "line_number": 114, "usage_type": "name"}, {"api_name": "lib.data.container_info.ContainerInfo", "line_number": 115, "usage_type": "name"}, {"api_name": "abstract_method_exception.AbstractMethodException", "line_number": 116, "usage_type": "call"}, {"api_name": "lib.data.environment_info.EnvironmentInfo", "line_number": 118, "usage_type": "name"}, {"api_name": "lib.test_environment.upload_exa_jdbc.UploadExaJDBC", "line_number": 124, "usage_type": "argument"}, {"api_name": "lib.test_environment.upload_virtual_schema_jdbc_adapter.UploadVirtualSchemaJDBCAdapter", "line_number": 128, "usage_type": "argument"}, {"api_name": "lib.test_environment.populate_data.PopulateEngineSmallTestDataToDatabase", "line_number": 132, "usage_type": "argument"}]}
+{"seq_id": "171826920", "text": "import os\nimport unittest\n\nfrom google.appengine.ext import testbed\n\n\nclass EnvVarsTestCase(unittest.TestCase):\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.setup_env(\n app_id='your-app-id',\n my_config_setting='example',\n overwrite=True)\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def testEnvVars(self):\n assert os.environ['APPLICATION_ID'] == 'your-app-id'\n assert os.environ['MY_CONFIG_SETTING'] == 'example'\n\nif __name__ == '__main__':\n unittest.main()", "sub_path": "localtesting/test_env_vars.py", "file_name": "test_env_vars.py", "file_ext": "py", "file_size_in_byte": 596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.testbed.Testbed", "line_number": 9, "usage_type": "call"}, {"api_name": "google.appengine.ext.testbed", "line_number": 9, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "437665020", "text": "\"\"\"\nFlo Smart Home Water Control System for Home Assistant\nSee https://github.com/rsnodgrass/hass-flo-water\n\nFor good example of update, see Leaf sensor/switch:\nhttps://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/nissan_leaf/__init__.py\n\"\"\"\nimport logging\nimport json\nimport requests\nimport time\nimport datetime\nimport voluptuous as vol\nfrom requests.exceptions import HTTPError, ConnectTimeout\n\nfrom homeassistant.helpers import discovery\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.const import (\n CONF_USERNAME, CONF_PASSWORD, CONF_NAME, CONF_SCAN_INTERVAL)\nimport homeassistant.helpers.config_validation as cv\n\nfrom pyflowater import PyFlo\n\nLOG = logging.getLogger(__name__)\n\nFLO_DOMAIN = 'flo'\nFLO_SERVICE = 'flo_service'\n\nNOTIFICATION_ID = 'flo_notification'\n\nCONF_AUTO_DISCOVER = 'discovery'\nCONF_LOCATION_ID = 'location_id'\nCONF_STARTDATE = 'startdate'\n\nCONFIG_SCHEMA = vol.Schema({\n FLO_DOMAIN: vol.Schema({\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n # location_id: [ , , ... ]\n vol.Optional(CONF_LOCATION_ID): cv.ensure_list,\n vol.Optional(CONF_STARTDATE): cv.string\n })\n}, extra=vol.ALLOW_EXTRA)\n\n# cache expiry in minutes; TODO: make this configurable (with a minimum to prevent DDoS)\nFLO_CACHE_EXPIRY = 10\n\n\ndef setup(hass, config):\n \"\"\"Set up the Flo Water Control System\"\"\"\n\n conf = config[FLO_DOMAIN]\n username = conf.get(CONF_USERNAME)\n password = conf.get(CONF_PASSWORD)\n\n try:\n flo = PyFlo(username, password)\n if not flo.is_connected:\n LOG.error(f\"Could not connect to Flo service with user {username}\")\n return False\n\n # save password to enable automatic re-authentication while this HA instance is running\n flo.save_password(password)\n\n hass.data[FLO_SERVICE] = flo\n\n except (ConnectTimeout, HTTPError) as ex:\n LOG.error(f\"Unable to connect to Flo service: {str(ex)}\")\n hass.components.persistent_notification.create(\n f\"Error: {ex}
You will need to restart Home Assistant after fixing.\",\n title='Flo', notification_id=NOTIFICATION_ID\n )\n return False\n\n location_ids = conf.get(CONF_LOCATION_ID)\n startdate = conf.get(CONF_STARTDATE)\n\n # if no location is specified, this will auto discover ALL Flo locations/devices and add them to Home Assistant\n if location_ids == None:\n location_ids = []\n for location in flo.locations():\n location_ids.append(location['id'])\n LOG.info(\n f\"Discovered Flo location {location['id']} ({location['nickname']})\")\n\n # create sensors/switches for all configured locations\n for location_id in location_ids:\n discovery_info = {CONF_LOCATION_ID: location_id,\n CONF_STARTDATE: startdate}\n for component in ['switch', 'binary_sensor', 'sensor']:\n discovery.load_platform(\n hass, component, FLO_DOMAIN, discovery_info, config)\n\n return True\n\n\nclass FloEntity(Entity):\n \"\"\"Base Entity class for Flo water inflow control device\"\"\"\n\n def __init__(self, hass, device_id):\n \"\"\"Store service upon init.\"\"\"\n self._hass = hass\n self._flo = hass.data[FLO_SERVICE]\n self._device_id = device_id\n self._attrs = {\n 'device_id': device_id\n }\n\n @property\n def name(self):\n \"\"\"Return the display name for this sensor\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"A coordinate manually updates all the sensors, so ensure polling ON for HA to detect state changes!\"\"\"\n # FIXME: we could make these dependent sensors not be polling, since the coordinator could let HA know what changes\n return True\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the device state attributes.\"\"\"\n return self._attrs\n\n @property\n def device_key(self):\n return f\"flo_device_{self._device_id}\"\n\n @property\n def device_state(self):\n return self._hass.data.get(self.device_key)\n\n def get_telemetry(self, field):\n if self.device_state:\n telemetry = self.device_state['telemetry']\n current_states = telemetry['current']\n return current_states[field]\n else:\n return None\n\n def update_state(self, state):\n self._state = state\n\n # For debugging, mark the attribute with current timestamp to indicate updated\n if self._attrs:\n now = datetime.datetime.now()\n self._attrs['last_updated'] = now.strftime(\"%m/%d/%Y %H:%M:%S\")\n\n\n\n\n\n async def refresh(self) -> bool:\n \"\"\"Refresh ecobee tokens and update config entry.\"\"\"\n _LOGGER.debug(\"Refreshing ecobee tokens and updating config entry\")\n result = await self._hass.async_add_executor_job(self.ecobee.refresh_tokens)\n if result == True:\n self._hass.config_entries.async_update_entry(\n self._entry,\n data={\n CONF_API_KEY: self.ecobee.config[ECOBEE_API_KEY],\n CONF_REFRESH_TOKEN: self.ecobee.config[ECOBEE_REFRESH_TOKEN],\n },\n )\n return True\n _LOGGER.error(\"Error refreshing ecobee tokens\")\n return False", "sub_path": "custom_components/flo/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 35, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 36, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 37, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_USERNAME", "line_number": 37, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 38, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_PASSWORD", "line_number": 38, "usage_type": "argument"}, {"api_name": "voluptuous.Optional", "line_number": 40, "usage_type": "call"}, {"api_name": "voluptuous.Optional", "line_number": 41, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 37, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 37, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 38, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 38, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.ensure_list", "line_number": 40, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 40, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 41, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 41, "usage_type": "name"}, {"api_name": "voluptuous.ALLOW_EXTRA", "line_number": 43, "usage_type": "attribute"}, {"api_name": "homeassistant.const.CONF_USERNAME", "line_number": 53, "usage_type": "argument"}, {"api_name": "homeassistant.const.CONF_PASSWORD", "line_number": 54, "usage_type": "argument"}, {"api_name": "pyflowater.PyFlo", "line_number": 57, "usage_type": "call"}, {"api_name": "requests.exceptions.ConnectTimeout", "line_number": 67, "usage_type": "name"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 67, "usage_type": "name"}, {"api_name": "homeassistant.helpers.discovery.load_platform", "line_number": 91, "usage_type": "call"}, {"api_name": "homeassistant.helpers.discovery", "line_number": 91, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity.Entity", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "attribute"}]}
+{"seq_id": "49153410", "text": "import falcon\nfrom wtforms import Form, ValidationError\nfrom wtforms.ext.sqlalchemy.orm import model_form\n\nfrom ccm.appointments.models import Appointment\nfrom ccm.patients.models import Patient\n\nfrom ccm.settings import session\n\n\ndef validate_patient(form, field):\n patient = session.query(Patient).get(field.data)\n if not patient:\n raise ValidationError('Patient with id={id} not found'.format(id=field.data))\n\n\nAppointmentForm = model_form(\n Appointment,\n Form,\n field_args={\n 'patient_id': {\n 'validators': [\n validate_patient\n ]\n }\n },\n exclude_fk=False\n)\n\n\ndef validate_appointment_create(req, resp, resource, params):\n data = req.context.get('doc')\n appointment_form = AppointmentForm(data=data)\n\n if not appointment_form.validate():\n raise falcon.HTTPBadRequest(\n 'Form Validation Error',\n appointment_form.errors\n )\n", "sub_path": "ccm/appointments/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ccm.settings.session.query", "line_number": 12, "usage_type": "call"}, {"api_name": "ccm.patients.models.Patient", "line_number": 12, "usage_type": "argument"}, {"api_name": "ccm.settings.session", "line_number": 12, "usage_type": "name"}, {"api_name": "wtforms.ValidationError", "line_number": 14, "usage_type": "call"}, {"api_name": "wtforms.ext.sqlalchemy.orm.model_form", "line_number": 17, "usage_type": "call"}, {"api_name": "ccm.appointments.models.Appointment", "line_number": 18, "usage_type": "argument"}, {"api_name": "wtforms.Form", "line_number": 19, "usage_type": "argument"}, {"api_name": "falcon.HTTPBadRequest", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "320423893", "text": "import json\nimport numpy as np\n\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.preprocessing import LabelEncoder\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.cross_validation import train_test_split\n\n# Data preparation\nDATA_DIR = \"./data\"\nwith open(DATA_DIR + '/train.json', encoding='utf8') as data_file:\n train_data = json.load(data_file)\n \nX = [x['ingredients'] for x in train_data]\nX = [dict(zip(x, np.ones(len(x)))) for x in X]\n\nv = DictVectorizer()\nX = v.fit_transform(X)\nfeature_names = np.array(v.feature_names_)\n\nle = LabelEncoder()\ny = [y['cuisine'] for y in train_data]\ny = le.fit_transform(y).astype(np.int32)\nlabel_names = le.classes_\n\n# KNN\n\n# finding the best number of neighbors - 17\nX_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.3,\n random_state=10)\n\n# n_neighbors = range(1, 51, 2)\n# scores = []\n# for n in n_neighbors:\n# clf = KNN(n)\n# clf.fit(X_train, y_train)\n# scores.append(clf.score(X_test, y_test))\n\n# plt.figure(figsize=(20, 8))\n# plt.plot(n_neighbors, scores, linewidth=3.0)\n# plt.show()\n# \nscores = []\nidx = np.random.uniform(0, 1, X.shape[0]) >= 0.3\nfor n in range(5):\n np.random.shuffle(idx)\n X_train, X_test = X[idx], X[idx == False]\n y_train, y_test = y[idx], y[idx == False]\n clf = KNN(17, weights='uniform')\n clf.fit(X_train, y_train)\n scores.append(clf.score(X_test, y_test))", "sub_path": "code/whats-cooking.py", "file_name": "whats-cooking.py", "file_ext": "py", "file_size_in_byte": 1632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "590586549", "text": "from opengever.document.archival_file import ArchivalFileConverter\nfrom opengever.dossier.docprops import DocPropertyWriter\nfrom zope.lifecycleevent import IObjectRemovedEvent\n\n\nDISABLE_DOCPROPERTY_UPDATE_FLAG = 'disable_docproperty_update'\n\n\ndef checked_out(context, event):\n _update_docproperties(context)\n\n\ndef before_documend_checked_in(context, event):\n _update_docproperties(context)\n\n\ndef document_moved_or_added(context, event):\n if IObjectRemovedEvent.providedBy(event):\n return\n\n if context.REQUEST.get(DISABLE_DOCPROPERTY_UPDATE_FLAG):\n return\n\n _update_docproperties(context)\n\n\ndef _update_docproperties(document):\n DocPropertyWriter(document).update()\n\n\ndef set_archival_file_state(context, event):\n # Because every filewidget is always marked as changed, in the event\n # descriptions, even when no file has changed, we have to check the request\n request = context.REQUEST\n\n if request.get('ACTUAL_URL').endswith('edit_archival_file'):\n field_name = 'archival_file'\n else:\n field_name = 'IDocumentMetadata.archival_file'\n\n fileupload = request.get('form.widgets.{}'.format(field_name))\n action = request.get('form.widgets.{}.action'.format(field_name), '')\n\n if bool(fileupload):\n ArchivalFileConverter(context).handle_manual_file_upload()\n\n file_removed = action == u'remove'\n file_removed_in_archival_form = isinstance(action, list) and u'remove' in action\n\n if file_removed or file_removed_in_archival_form:\n ArchivalFileConverter(context).remove_state()\n", "sub_path": "opengever/document/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 1564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "zope.lifecycleevent.IObjectRemovedEvent.providedBy", "line_number": 18, "usage_type": "call"}, {"api_name": "zope.lifecycleevent.IObjectRemovedEvent", "line_number": 18, "usage_type": "name"}, {"api_name": "opengever.dossier.docprops.DocPropertyWriter", "line_number": 28, "usage_type": "call"}, {"api_name": "opengever.document.archival_file.ArchivalFileConverter", "line_number": 45, "usage_type": "call"}, {"api_name": "opengever.document.archival_file.ArchivalFileConverter", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "307120840", "text": "from django.conf.urls import url\nfrom taps_oan import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'about/$', views.about, name='about'),\n url(r'^add_pub/$', views.add_pub, name='add_pub'),\n url(r'^pub/(?P[\\w\\-]+)/$', \n views.show_pub, name='show_pub'),\n url(r'^pub/(?P[\\w\\-]+)/add_beer/$', \n views.add_beer, name='add_beer'),\n url(r'^beer/(?P[\\w\\-]+)/$', \n views.show_beer, name='show_beer'),\n url(r'^beer/(?P[\\w\\-]+)/add_carrier/$',\n views.add_carrier, name='add_carrier'),\n url(r'^register/$',\n views.register,name='register'),\n url(r'^login/$', \n views.user_login, name='login'),\n url(r'^logout/$', \n views.user_logout, name='logout'),\n url(r'^account/$',\n views.account, name='account'),\n url(r'^yelp/(?P[\\w\\-]+)/$', \n views.yelpLookUp, name='yelpLookUp'),\n\n]\n", "sub_path": "taps_oan/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "taps_oan.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "taps_oan.views.about", "line_number": 7, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "taps_oan.views.add_pub", "line_number": 8, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "taps_oan.views.show_pub", "line_number": 10, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "taps_oan.views.add_beer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "taps_oan.views.show_beer", "line_number": 14, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "taps_oan.views.add_carrier", "line_number": 16, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "taps_oan.views.register", "line_number": 18, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "taps_oan.views.user_login", "line_number": 20, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "taps_oan.views.user_logout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "taps_oan.views.account", "line_number": 24, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "taps_oan.views.yelpLookUp", "line_number": 26, "usage_type": "attribute"}, {"api_name": "taps_oan.views", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "319954014", "text": "from flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom utils.image_diff import ImageDiff\nfrom utils.image_merge import Stitcher\nfrom utils.image_similar import HashSimilar\nfrom utils.image_text import get_text_roi\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/vision/diff', methods=[\"POST\"])\ndef vision_diff():\n data = {\n \"code\": 0,\n \"data\": ImageDiff().get_image_score(request.json['image1'], request.json['image2'],\n request.json['image_diff_name'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/merge', methods=[\"POST\"])\ndef vision_merge():\n data = {\n \"code\": 0,\n \"data\": Stitcher(request.json['image_list']).image_merge(request.json['name'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/similar', methods=[\"POST\"])\ndef vision_similar():\n data = {\n \"code\": 0,\n \"data\": HashSimilar().get_hash_similar(request.json['image1'], request.json['image2'])\n }\n return jsonify(data)\n\n\n@app.route('/vision/text', methods=[\"POST\"])\ndef vision_text():\n data = {\n \"code\": 0,\n \"data\": get_text_roi(request.json['image'])\n }\n return jsonify(data)\n\n\n@app.errorhandler(Exception)\ndef error(e):\n ret = dict()\n ret[\"code\"] = 1\n ret[\"data\"] = repr(e)\n return jsonify(ret)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=9092)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.image_diff.ImageDiff", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.image_merge.Stitcher", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.image_similar.HashSimilar", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.image_text.get_text_roi", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "148894343", "text": "import pandas\nimport logging\n\nfrom osgeo import ogr\n\n\nlogging.basicConfig()\n\n\ndef load_polygons():\n driver = ogr.GetDriverByName('ESRI Shapefile')\n polyshp = driver.Open('gis/shp/neighborhoods.shp')\n polygon_layer = polyshp.GetLayer(0)\n return list(polygon_layer)\n\n\ndef get_point(x, y):\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x * 10000, y * 10000)\n return point\n\n\ndef classify_point(point, polygons):\n for polygon in polygons:\n if polygon.GetGeometryRef().Contains(point):\n return polygon.GetField('nhood')\n logging.info(\"No polygon found for {}\".format(str(point)))\n return \"Unknown\"\n\n\ndef classify_df(df):\n polygons = load_polygons()\n coords = zip(df.x.tolist(), df.y.tolist())\n df['classified_neighborhood'] = pandas.Series([classify_point(get_point(*coord), polygons) for coord in coords])\n", "sub_path": "data/geo_classifier.py", "file_name": "geo_classifier.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "osgeo.ogr.GetDriverByName", "line_number": 11, "usage_type": "call"}, {"api_name": "osgeo.ogr", "line_number": 11, "usage_type": "name"}, {"api_name": "osgeo.ogr.Geometry", "line_number": 18, "usage_type": "call"}, {"api_name": "osgeo.ogr", "line_number": 18, "usage_type": "name"}, {"api_name": "osgeo.ogr.wkbPoint", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "602649974", "text": "\"\"\"\nAuthors:\nDavid Saper - 302598032 dav_sap\nAlon Perelmuter - 20063088 alonperl\n\"\"\"\n\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nimport time\nimport threading\nfrom pox.core import core\nimport pox.openflow.libopenflow_01 as of\nimport utils\nfrom pox.lib.packet.lldp import lldp, chassis_id, port_id, ttl, end_tlv\nfrom pox.lib.packet.ethernet import ethernet\nlog = core.getLogger()\ntutorial_list = []\n\nclass Tutorial (object):\n \"\"\"\n A Tutorial object is created for each switch that connects.\n A Connection object for that switch is passed to the __init__ function.\n \"\"\"\n\n def __init__ (self, connection):\n self.forward_table = {}\n self.connection = connection\n self.unauthorized_ports = []\n # self.discovery = Discovery()\n # Discovery.get_node(connection.dpid).connection = connection\n # This binds our PacketIn event listener\n connection.addListeners(self)\n def update_flow_table(self):\n \"\"\"\n This function goes over all ports found in unauthorized_port (port that are\n forbidden from graph because of the spanning tree.) and ask remove_rules_by_port\n to remove all the flows holding the forbidden ports.\n \"\"\"\n # log.debug('update flow table for switch {}'.format(self.connection.dpid))\n for port in self.unauthorized_ports:\n self.remove_rules_by_port(port)\n\n def _handle_PacketIn (self, event):\n \"\"\"\n Handles packet in messages from the switch.\n \"\"\"\n if event.parsed.type == ethernet.LLDP_TYPE:\n return\n packet = event.parsed # Packet is the original L2 packet sent by the switch\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n # Ignore IPv6 discovery messages\n if \"33:33:00:00:00:\" in str(packet.dst):\n return\n\n packet_in = event.ofp # packet_in is the OpenFlow packet sent by the switch\n\n self.act_like_switch(packet, packet_in)\n\n def send_packet (self, buffer_id, raw_data, out_port, in_port):\n \"\"\"\n Sends a packet out of the specified switch port.\n If buffer_id is a valid buffer on the switch, use that. Otherwise,\n send the raw data in raw_data.\n The \"in_port\" is the port number that packet arrived on. Use\n OFPP_NONE if you're generating this packet.\n \"\"\"\n # We tell the switch to take the packet with id buffer_if from in_port\n # and send it to out_port\n # If the switch did not specify a buffer_id, it must have specified\n # the raw data of the packet, so in this case we tell it to send\n # the raw data\n msg = of.ofp_packet_out()\n msg.in_port = in_port\n if buffer_id != -1 and buffer_id is not None:\n # We got a buffer ID from the switch; use that\n msg.buffer_id = buffer_id\n else:\n # No buffer ID from switch -- we got the raw data\n if raw_data is None:\n # No raw_data specified -- nothing to send!\n return\n msg.data = raw_data\n\n # Add an action to send to the specified port\n action = of.ofp_action_output(port = out_port)\n msg.actions.append(action)\n\n # Send message to switch\n self.connection.send(msg)\n\n def send_flow_mod(self, packet, packet_in, out_port):\n \"\"\"\n This function install a flow for a specific request to the switch\n \"\"\"\n log.debug(\"Installing new flow rule on SW: {}; in_port: {}; dl_src: {}; dl_dst: {}\".format(self.connection.dpid, packet_in.in_port, packet.src,packet.dst))\n fm = of.ofp_flow_mod()\n fm.match.in_port = packet_in.in_port\n fm.match.dl_dst = packet.dst\n fm.match.dl_src = packet.src\n # it is not mandatory to set fm.data or fm.buffer_id\n if packet_in.buffer_id != -1 and packet_in.buffer_id is not None:\n # Valid buffer ID was sent from switch, we do not need to encapsulate raw data in response\n fm.buffer_id = packet_in.buffer_id\n else:\n if packet_in.data is not None:\n # No valid buffer ID was sent but raw data exists, send raw data with flow_mod\n fm.data = packet_in.data\n else:\n return\n action = of.ofp_action_output(port=out_port)\n fm.actions.append(action)\n\n # Send message to switch\n self.connection.send(fm)\n\n\n\n\n def act_like_switch(self, packet, packet_in):\n \"\"\"\n This function manage the forwarding table of the switch.\n the func. gets the packet the the switch passed to the controller,\n and decide whether to ignore the packet or install a new flow to the switch,\n or just flood the packet.\n \"\"\"\n if packet_in.in_port in self.unauthorized_ports:\n log.debug(\"SW \" + str(self.connection.dpid) + \" got packet from unauthorized port \" + str(packet_in.in_port))\n return\n if packet.src in self.forward_table and packet_in.in_port != self.forward_table[packet.src]:\n self.remove_flow(packet.src)\n self.forward_table[packet.src] = packet_in.in_port\n if packet.dst in self.forward_table:\n self.send_flow_mod(packet, packet_in, self.forward_table[packet.dst])\n else:\n ####FLOODING packet\n ports_list = self.connection.features.ports\n log.debug('SW:' + str(\n self.connection.dpid) + '; Flooding packet: dest = {}; src = {}; from in_port = {}; to all ports except '\n 'unauthorized = {}'.format(packet.dst, packet.src, packet_in.in_port, self.unauthorized_ports))\n for port in ports_list:\n if port.port_no not in self.unauthorized_ports and \\\n port.port_no < of.OFPP_MAX and \\\n port.port_no != packet_in.in_port:\n self.send_packet(None, packet_in.data, port.port_no, packet_in.in_port)\n\n def remove_flow(self, source):\n \"\"\"\n This function removes a flow from the switch by source mac address.\n It helps while links are turn off\n \"\"\"\n log.debug('Remove flow from SW: {} ; dl_dest = {}'.format(self.connection.dpid, source))\n fm = of.ofp_flow_mod()\n fm.command = of.OFPFC_DELETE\n # fm.match.dl_dst = source # change this if necessary\n fm.match.dl_dst = source # change this if necessary\n self.connection.send(fm) # send flow-mod message\n\n def remove_rules_by_port(self, port):\n \"\"\"\n This function removes a flow from the switch according to a given port number.\n It helps while removing edged in the graph.\n it will clean all the flows in the switch that connected to the port\n \"\"\"\n log.debug(\"Remove flow from SW: {} out_port:{}\".format(self.connection.dpid,port))\n msg = of.ofp_flow_mod(command=of.OFPFC_DELETE, out_port=port)\n self.connection.send(msg)\n mac_to_remove = []\n for mac, in_port in self.forward_table.iteritems():\n if in_port == port:\n mac_to_remove.append(mac)\n fm = of.ofp_flow_mod()\n fm.command = of.OFPFC_DELETE\n fm.match.dl_src = mac\n self.connection.send(fm)\n for mac in mac_to_remove:\n del self.forward_table[mac]\n\n\nclass Discovery(object):\n __metaclass__ = utils.SingletonType\n LLDP_INTERVAL = 1\n TIME_TO_REMOVE = 6\n LLDP_DST_ADDR = '\\x01\\x80\\xc2\\x00\\x00\\x0e'\n def __init__(self):\n core.openflow.addListeners(self)\n self.topology = utils.Graph()\n self.edge_timer = utils.Timer(3,self.run_edges,recurring=True)\n self.lock = threading.Lock()\n self.sub_tree = []\n\n\n\n def is_port_active(self, node, port):\n \"\"\"\"\n This function gets a Node and a port number, It will go over the sub_tree,\n (sub_tree is the kruskal minimum spanning tree) and will return if the given\n port is a forbidden port.\n :return:\n True : if link is active\n False: if link is forbidden\n \"\"\"\n for edge in self.sub_tree:\n if node in edge:\n if self.topology.nodes[node][port][0] in edge:\n return True\n return False\n def _handle_ConnectionUp(self, event):\n \"\"\"\"\n Will be called when a switch is added. Use event.dpid for switch ID,\n and event.connection.send(...) to send messages to the switch.\n \"\"\"\n timer = utils.Timer(Discovery.LLDP_INTERVAL,self._send_lldp,args=[event],recurring=True)\n log.debug(\"New switch ConnectionUp dpid: {}\".format(event.dpid))\n self.lock.acquire()\n node = utils.Node(event.dpid)\n self.set_tutorial(node, event.connection)\n\n self.topology.add_node(node, {})\n self.lock.release()\n #send flow to the switch to pass every lldp packet to the controller\n fm = of.ofp_flow_mod()\n fm.match.dl_type = ethernet.LLDP_TYPE\n fm.match.dl_dst = self.LLDP_DST_ADDR\n # it is not mandatory to set fm.data or fm.buffer_id\n action = of.ofp_action_output(port=of.OFPP_CONTROLLER)\n fm.actions.append(action)\n # Send flow to the switch\n event.connection.send(fm)\n\n @staticmethod\n def set_tutorial(node, connection):\n \"\"\"\"\n connect given node to his real Tutorial.\n \"\"\"\n for tuto in tutorial_list:\n if tuto.connection == connection:\n node.tutorial = tuto\n return True\n return False\n def _handle_ConnectionDown(self, event):\n \"\"\"\"\n Will be called when a switch goes down. Use event.dpid for switch ID.\n \"\"\"\n log.debug(\"_handle_ConnectionDown: dpid {}\".format(event.dpid))\n self.lock.acquire()\n node = self.get_node(event.dpid)\n far_ends = []\n for port, port_data in self.topology.nodes[node].iteritems():\n far_ends.append(port_data[0])\n for far in far_ends:\n self.remove_edge((node, far))\n self.topology.remove_node(node)\n self.Kruskal_Mst()\n self.lock.release()\n\n def _handle_PortStatus(self, event):\n \"\"\"\"\n Will be called when a link changes. Specifically, when event.ofp.desc.config is 1,\n it means that the link is down. Use event.dpid for switch ID and event.port for port number.\n \"\"\"\n log.debug(\"_handle_PortStatus: SW {} port{}; status {}\".format(event.dpid, event.port, event.ofp.desc.config))\n if event.ofp.desc.config == 1:\n #port is down\n self.lock.acquire()\n node = self.get_node(event.dpid)\n if event.port in self.topology.nodes[node]:\n far_node = self.topology.nodes[node][event.port][0]\n edge = (node, far_node)\n self.remove_edge(edge)\n log.debug(\"Removed edge (sw{})<>(sw{}); Reason: ports are down\".format(str(node),str(far_node)))\n # log.debug(str(far_node) +self.ports_dict_to_string(self.topology.nodes[far_node]))\n self.Kruskal_Mst()\n # else:\n # log.debug(\"Trying to remove a not active edge : Switch {} port{}\".format(event.dpid, event.port))\n self.lock.release()\n\n\n\n\n def _handle_PacketIn(self, event):\n \"\"\"\"\n Will be called when a packet is sent to the controller. Same as in the previous part.\n Use it to find LLDP packets (event.parsed.type == ethernet.LLDP_TYPE) and update\n the topology according to them.\n \"\"\"\n if event.parsed.type != ethernet.LLDP_TYPE:\n return\n\n pkt = event.parsed\n lldp_p = pkt.payload\n ch_id = lldp_p.tlvs[0]\n po_id = lldp_p.tlvs[1]\n\n r_dpid = int(ch_id.id)\n r_port = int(po_id.id)\n # log.debug(\"Discovery _handle_PacketIn to dpid {} from Sw{}port{}\".format(event.dpid, r_dpid, r_port))\n self.lock.acquire()\n node = self.get_node(event.dpid)\n far_node = self.get_node(r_dpid)\n if self.topology.get_edge(node, far_node):\n self.topology.update_edge(node, far_node, time.time())\n else:\n log.debug(\"Discovered new edge: (sw: \" + str(node) + \"; port: \" + str(event.port) + \") <> (sw: \" + str(\n r_dpid) + \"; port: \" + str(r_port) + \")\")\n self.topology.add_edge(node,far_node,time.time())\n self.topology.nodes[node][event.port] = (far_node,r_port)\n self.topology.nodes[far_node][r_port] = (node, event.port)\n self.Kruskal_Mst()\n self.lock.release()\n\n def ports_dict_to_string(self,ports):\n \"\"\"\"\n This function gets a dictionary of ports and return a string of all the nodes.\n raised for log reasons.\n \"\"\"\n str_ports = ''\n for port,far in ports.iteritems():\n str_ports += \"p:\" + str(port) + \" far_node:\"+str(far[0]) + \" far_port:\"+str(far[1])\n return str_ports\n\n def _send_lldp(self, event ):\n \"\"\"\"\n \"\"\"\"\"\n # log.debug('Flooding packet : dest = {} src = {} in_port = {}'.format(packet.dst, packet.src, packet_in.in_port))\n # self.send_packet(packet_in.buffer_id, packet_in.data, of.OFPP_FLOOD, packet_in.in_port)\n # log.debug(\"send lldp sw : {}\".format(event.dpid))\n dst = Discovery.LLDP_DST_ADDR\t\t# == '\\x01\\x80\\xc2\\x00\\x00\\x0e'\n\n for p in event.ofp.ports:\n if p.port_no < of.OFPP_MAX:\n # Build LLDP packet\n src = str(p.hw_addr)\n port = p.port_no\n\n lldp_p = lldp() # create LLDP payload\n ch_id = chassis_id() # Add switch ID part\n ch_id.subtype = 1\n ch_id.id = str(event.dpid)\n lldp_p.add_tlv(ch_id)\n po_id = port_id() # Add port ID part\n po_id.subtype = 2\n po_id.id = str(port)\n lldp_p.add_tlv(po_id)\n tt = ttl() # Add TTL\n tt.ttl = Discovery.LLDP_INTERVAL # == 1\n lldp_p.add_tlv(tt)\n lldp_p.add_tlv(end_tlv())\n\n ether = ethernet() # Create an Ethernet packet\n ether.type = ethernet.LLDP_TYPE # Set its type to LLDP\n ether.src = src # Set src, dst\n ether.dst = dst\n ether.payload = lldp_p # Set payload to be the LLDP payload\n\n # send LLDP packet\n pkt = of.ofp_packet_out(action = of.ofp_action_output(port = port))\n pkt.data = ether\n event.connection.send(pkt)\n def run_edges(self):\n \"\"\"\"\n scan timestamps of all edges. If an edge was not seen for more than 6 seconds, remove it from the topology.\n \"\"\"\n self.lock.acquire()\n edges_to_remove = []\n for edge,data in self.topology.edges.iteritems():\n if time.time()-data > Discovery.TIME_TO_REMOVE:\n log.debug(\"Removed edge (sw{}<>sw{}); Reason: LLDP not arrived for long time. timeout. : \"\n \"\".format(edge[0].dpid,edge[1].dpid) )\n edges_to_remove += [edge]\n if edges_to_remove:\n for e in edges_to_remove:\n self.remove_edge(e)\n self.Kruskal_Mst()\n\n self.lock.release()\n\n def remove_edge(self, edge):\n # del information from nodes\n port0 = -1\n port1 = -1\n log.debug(\"remove edge ({},{})\".format(edge[0],edge[1]))\n for port, port_data in self.topology.nodes[edge[0]].iteritems():\n if port_data[0] == edge[1]:\n log.debug(\"empty the ports \" + str(port) + str(port_data[0].dpid))\n port0 = port\n port1 = port_data[1]\n del self.topology.nodes[edge[0]][port0]\n edge[0].tutorial.remove_rules_by_port(port0)\n del self.topology.nodes[edge[1]][port1]\n edge[1].tutorial.remove_rules_by_port(port1)\n # remove edge\n self.topology.delete_edge(edge[0], edge[1])\n def get_node(self, dpid):\n for node in self.topology.nodes:\n if node.dpid == dpid:\n return node\n\n def Kruskal_Mst(self):\n \"\"\"\"\n This function calculate the minimum spanning tree by kruskal algorithm\n it will update the self.sub_tree with his decision.\n It also calls for update_unauthorized_ports that will update all the nodes\n of the graph by the MST demands.\n \"\"\"\n self.sub_tree = []\n uf = utils.UnionFind()\n for v in self.topology.nodes:\n uf.make_set(v)\n for edge in self.topology.edges:\n if uf.find(edge[0]) != uf.find(edge[1]):\n self.sub_tree.append((edge[0],edge[1]))\n uf.union(edge[0],edge[1])\n log.debug(\"Kruskal full graph: {}\".format(self.edges_to_str(self.topology.edges)))\n log.debug(\"Kruskal MST: {} [these are the active links]\".format(self.edges_to_str(self.sub_tree)))\n self.update_unauthorized_ports()\n\n def edges_to_str(self,edges):\n \"\"\"\"\n :return: string of all the edges\n \"\"\"\n str_to_print = ''\n for edge in edges:\n str_to_print += '(' + str(edge[0]) + \",\" + str(edge[1]) + \") \"\n return str_to_print\n\n def update_unauthorized_ports(self):\n \"\"\"\"\n This Function will go over all the nodes and will update all the unauthorized ports\n it will call updae_flow_table of each node that will update his flow table.\n \"\"\"\n for node, ports in self.topology.nodes.iteritems():\n node.tutorial.unauthorized_ports = []\n for port in ports:\n if not self.is_port_active(node, port):\n node.tutorial.unauthorized_ports.append(port)\n log.debug(\"sw: \" + str(node) + \"; unauthorized ports by ST are: \" + str(node.tutorial.unauthorized_ports))\n node.tutorial.update_flow_table()\n\ndef launch ():\n \"\"\"\n Starts the component\n \"\"\"\n def start_switch (event):\n log.debug(\"Controlling %s\" % (event.connection,))\n t = Tutorial(event.connection)\n tutorial_list.append(t)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)\n core.register('discovery', Discovery())\n\n", "sub_path": "of_learning_switch_spanning_tree.py", "file_name": "of_learning_switch_spanning_tree.py", "file_ext": "py", "file_size_in_byte": 18488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pox.core.core.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "pox.core.core", "line_number": 16, "usage_type": "name"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 47, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_packet_out", "line_number": 75, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 75, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 88, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 88, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 99, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 99, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 113, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 113, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_MAX", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 145, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 155, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 155, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 156, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 168, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 168, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 174, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 174, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPFC_DELETE", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.SingletonType", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pox.core.core.openflow.addListeners", "line_number": 188, "usage_type": "call"}, {"api_name": "pox.core.core.openflow", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pox.core.core", "line_number": 188, "usage_type": "name"}, {"api_name": "utils.Graph", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.Timer", "line_number": 190, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 191, "usage_type": "call"}, {"api_name": "utils.Timer", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.Node", "line_number": 218, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01.ofp_flow_mod", "line_number": 224, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 224, "usage_type": "name"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 225, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 228, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 228, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_CONTROLLER", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 289, "usage_type": "name"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "time.time", "line_number": 308, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01.OFPP_MAX", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 333, "usage_type": "name"}, {"api_name": "pox.lib.packet.lldp.lldp", "line_number": 338, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.chassis_id", "line_number": 339, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.port_id", "line_number": 343, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.ttl", "line_number": 347, "usage_type": "call"}, {"api_name": "pox.lib.packet.lldp.end_tlv", "line_number": 350, "usage_type": "call"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 352, "usage_type": "call"}, {"api_name": "pox.lib.packet.ethernet.ethernet.LLDP_TYPE", "line_number": 353, "usage_type": "attribute"}, {"api_name": "pox.lib.packet.ethernet.ethernet", "line_number": 353, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_packet_out", "line_number": 359, "usage_type": "call"}, {"api_name": "pox.openflow.libopenflow_01", "line_number": 359, "usage_type": "name"}, {"api_name": "pox.openflow.libopenflow_01.ofp_action_output", "line_number": 359, "usage_type": "call"}, {"api_name": "time.time", "line_number": 369, "usage_type": "call"}, {"api_name": "utils.UnionFind", "line_number": 409, "usage_type": "call"}, {"api_name": "pox.core.core.openflow.addListenerByName", "line_number": 450, "usage_type": "call"}, {"api_name": "pox.core.core.openflow", "line_number": 450, "usage_type": "attribute"}, {"api_name": "pox.core.core", "line_number": 450, "usage_type": "name"}, {"api_name": "pox.core.core.register", "line_number": 451, "usage_type": "call"}, {"api_name": "pox.core.core", "line_number": 451, "usage_type": "name"}]}
+{"seq_id": "606320313", "text": "from datetime import datetime\nfrom typing import List\nfrom enum import Enum\nfrom pprint import pprint\nfrom acaisdk.utils.utils import bytes_to_size\n\n\nclass Alignment(Enum):\n LEFT = '{{:{}}}'\n RIGHT = '{{:>{}}}'\n\n\nclass PrettyPrint:\n @staticmethod\n def single_col(data: List, lexi_sort=False):\n if lexi_sort:\n data = sorted(data)\n for l in data:\n print(l)\n\n @staticmethod\n def list_with_meta(file_set, file_ids: List[str], files_meta: List,\n human_readable_size=True):\n \"\"\"\n :param file_set:\n :param human_readable_size:\n :param file_ids:\n :param files_meta: expects a list of meta dicts, one dict per file\n :return:\n \"\"\"\n sorted_file_ids = PrettyPrint.sort_by_type(file_ids)\n id_to_meta = {d['_id']: d for d in files_meta} # type: dict\n\n # Columns: FilePath:Version, size, createdBy, createdAt\n cols = [\n ['[{}]'.format(file_set) if file_set else '[/]',\n 'size',\n 'user',\n 'created']\n ]\n align = [Alignment.LEFT,\n Alignment.RIGHT,\n Alignment.RIGHT,\n Alignment.RIGHT]\n\n # Maybe some file_ids does not have meta\n for fid in sorted_file_ids:\n if fid in id_to_meta:\n size = str(id_to_meta[fid]['__size__'])\n if human_readable_size:\n size = bytes_to_size(int(size))\n uid = str(id_to_meta[fid]['__creator_id__'])\n created_at = id_to_meta[fid]['__create_time__'] // 1000\n ts = datetime \\\n .utcfromtimestamp(created_at) \\\n .strftime('%Y-%m-%d %H:%M:%S')\n cols.append([fid, size, uid, ts])\n else:\n cols.append([fid, '-', '-', '-'])\n\n PrettyPrint.aligned_print(cols, align)\n\n @staticmethod\n def aligned_print(rows: List[List[str]],\n alignment: List[Alignment]):\n # Loop and set max column width\n max_col_width = [0, 0, 0, 0]\n for row in rows:\n for i, c in enumerate(row):\n max_col_width[i] = max(max_col_width[i], len(c))\n # Print\n template = ' '.join([alignment[i].value.format(w)\n for i, w in enumerate(max_col_width)])\n for r in rows:\n print(template.format(*r))\n\n @staticmethod\n def job(j):\n print('Registered job id: {}'.format(j.id))\n pprint(dict(j.dict))\n\n @staticmethod\n def sort_by_type(file_ids):\n dirs = [f for f in file_ids if f.endswith('/')]\n files = [f for f in file_ids if not f.endswith('/')]\n return sorted(dirs) + sorted(files)\n\n @staticmethod\n def print(content):\n pprint(content)\n", "sub_path": "acaicli/prettyprint.py", "file_name": "prettyprint.py", "file_ext": "py", "file_size_in_byte": 2855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "acaisdk.utils.utils.bytes_to_size", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "pprint.pprint", "line_number": 80, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "243876995", "text": "import tablib\n\n\ndefault_config = {}\n\n\nclass ScrubError(Exception):\n pass\n\n\nclass Report(tablib.Dataset):\n\n def __init__(self, headers, *args, **kwargs):\n\n self.config = kwargs.pop('config', default_config.copy())\n # we don't want to set self.headers until our dataset is ready\n self._headers = headers\n self.obj_list = args\n\n dataset_data = list(self.obj_data_generator())\n self.tablib_init(*dataset_data, headers=self._headers, **kwargs)\n\n @classmethod\n def register_new_formats(cls, formats):\n for fmt in formats:\n def _import(self):\n return fmt.import_set(self)\n\n def export(self):\n return fmt.export_set(self)\n\n setattr(cls, fmt.title, property(export, _import))\n\n def tablib_init(self, *args, **kwargs):\n return super(Report, self).__init__(*args, **kwargs)\n\n def default_scrub(self, obj, header):\n\n if hasattr(obj, 'keys'):\n try:\n return obj[header]\n except KeyError:\n if self.config.get('raise_on_scrub_failure', False):\n raise ScrubError(\"mapping object has no value for `%s`\" \\\n % header)\n else:\n return None\n\n try:\n scrubbed_value = getattr(obj, header)\n except AttributeError:\n if self.config.get('raise_on_scrub_failure', False):\n raise ScrubError(\"obj has no attribute %s\" % header)\n else:\n return None\n\n return scrubbed_value\n\n def scrub(self, obj):\n \"\"\"Turn report_data into a dict of cleaned data\"\"\"\n return {}\n\n def obj_data_generator(self):\n \"\"\"Turn Report data into a proper tablib Dataset\n\n #) Get list of headers that scrub will handle\n #) for other fields, attempt default_scrub method\n #) by default, if default_scrub fails, use None. However\n if self.config['raise_on_scrub_failure'] is True, ValueError will raise\n instead\n \"\"\"\n\n for obj in self.obj_list:\n scrubbed_data = self.scrub(obj)\n\n def get_value(header, obj):\n\n # we can't use dict.get's default parameter as it will run\n # default_scrub even if the value is in the scrub dict\n try:\n value = scrubbed_data[header]\n except KeyError:\n value = self.default_scrub(obj, header)\n return value\n\n values = [\n get_value(header, obj)\n for header in self._headers\n ]\n\n yield values\n", "sub_path": "bly/reports.py", "file_name": "reports.py", "file_ext": "py", "file_size_in_byte": 2729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tablib.Dataset", "line_number": 11, "usage_type": "attribute"}]}
+{"seq_id": "585620974", "text": "from collections import defaultdict\nfrom typing import List, OrderedDict\n\n\nclass Solution:\n def fullBloomFlowers(self, flowers: List[List[int]], persons: List[int]) -> List[int]:\n states = defaultdict(int)\n for flower in flowers:\n start, end = flower\n end = end + 1\n if start not in states:\n states[start] = 1\n else:\n states[start] += 1\n if end not in states:\n states[end] = -1\n else:\n states[end] -= 1\n states = list(states.items())\n states.sort(key=lambda x: x[0])\n for i in range(1, len(states)):\n states[i] = (states[i][0], states[i][1] + states[i - 1][1])\n print(states)\n res = []\n print(states)\n from bisect import bisect\n for time in persons:\n pos = bisect(states, time, key=lambda x:x[0])\n if pos time:\n res.append(states[pos-1][1])\n else:\n res.append(states[pos][1])\n else:\n res.append(0)\n return res\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.fullBloomFlowers([[36,39],[29,49],[32,35],[14,43],[42,49],[48,48],[32,46],[6,41],[14,19]],\n[14,4]))", "sub_path": "2251.py", "file_name": "2251.py", "file_ext": "py", "file_size_in_byte": 1364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "bisect.bisect", "line_number": 28, "usage_type": "call"}, {"api_name": "{'bisect': 'bisect.bisect'}", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "219494275", "text": "from datetime import date, timedelta\n\nimport pytest\nfrom pytest import fixture\nfrom utilities.models.data_models import (\n Service,\n set_empty_list,\n set_service_regions,\n set_service_vehicle,\n)\n\n\n@pytest.mark.model\n@pytest.mark.unit\nclass TestService:\n \"\"\"Battery of tests for Service data model functionality.\"\"\"\n\n @pytest.fixture\n def valid_service(self) -> Service:\n \"\"\"Create a valid Service.\"\"\"\n service: Service = Service(name='Testing Service')\n\n yield service\n\n @pytest.mark.low\n def test_build__override_default_values(self, valid_service: fixture) -> None:\n \"\"\"Check that default values may be overridden post-build.\"\"\"\n service: Service = valid_service\n service.recurring_rides_enabled = True\n\n assert service.recurring_rides_enabled is True\n\n @pytest.mark.low\n def test_build__requires_name_param(self) -> None:\n \"\"\"Attempt to build a new service without entering params.\"\"\"\n with pytest.raises(TypeError) as e:\n Service() # type: ignore\n assert \"required positional argument: 'name'\" in str(e.value)\n\n @pytest.mark.low\n def test_build__set_default_values(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets default values.\"\"\"\n service: Service = valid_service\n\n assert (\n service.friday is True\n and service.monday is True\n and service.saturday is True\n and service.sunday is True\n and service.thursday is True\n and service.tuesday is True\n and service.wednesday is True\n and service.color == '1e88e5'\n and service.end_time == 86340\n and service.fare_required is False\n and service.in_advance_enabled is False\n and service.managed_mode is False\n and service.max_capacity == 10\n and service.on_demand_enabled is True\n and service.recurring_rides_enabled is False\n and service.rider_restricted is False\n and service.shibboleth_restricted is False\n and service.start_time == 0\n and service.stop_restriction == 'unrestricted'\n and service.wheelchair_accessible is True\n )\n\n @pytest.mark.low\n def test_build__set_empty_fields(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets empty lists.\"\"\"\n service: Service = valid_service\n\n assert service.addresses == set_empty_list() and service.exceptions == set_empty_list()\n\n @pytest.mark.low\n def test_build__set_end_date(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets an end date 10 days in the future.\"\"\"\n service: Service = valid_service\n\n assert date.isoformat(date.today() + timedelta(days=10)) in service.end_date\n\n @pytest.mark.low\n def test_build__set_none_values(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets None values.\"\"\"\n service: Service = valid_service\n\n assert (\n service.service_id is None\n and service.fare_price is None\n and service.max_schedule_time is None\n and service.shibboleth_affiliation is None\n and service.token_transit_fare_id is None\n )\n\n @pytest.mark.low\n def test_build__set_regions(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a region.\"\"\"\n service: Service = valid_service\n\n assert service.regions == set_service_regions()\n\n @pytest.mark.low\n def test_build__set_start_date(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a start date 1 day in the past.\"\"\"\n service: Service = valid_service\n\n assert date.isoformat(date.today() - timedelta(days=1)) in service.start_date\n\n @pytest.mark.low\n def test_build__set_vehicles(self, valid_service: fixture) -> None:\n \"\"\"Check that building a service sets a vehicle.\"\"\"\n service: Service = valid_service\n\n assert service.vehicles == set_service_vehicle()\n\n @pytest.mark.low\n def test_build__valid_input(self, valid_service: fixture) -> None:\n \"\"\"Build a service with valid input.\"\"\"\n service: Service = valid_service\n\n assert service.name == 'Testing Service'\n\n @pytest.mark.low\n def test_model__override_default_values(self) -> None:\n \"\"\"Check that default values may be overridden prior to build.\"\"\"\n service: Service = Service(name='Testing Service', recurring_rides_enabled=True)\n\n assert service.recurring_rides_enabled is True\n", "sub_path": "integration/models/test_service.py", "file_name": "test_service.py", "file_ext": "py", "file_size_in_byte": 4689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "utilities.models.data_models.Service", "line_number": 21, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "attribute"}, {"api_name": "utilities.models.data_models.Service", "line_number": 19, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 28, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 36, "usage_type": "call"}, {"api_name": "utilities.models.data_models.Service", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 41, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 43, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 69, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 71, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_empty_list", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 76, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 78, "usage_type": "name"}, {"api_name": "datetime.date.isoformat", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 83, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 85, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 96, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 98, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_service_regions", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 103, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.date.isoformat", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 110, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 112, "usage_type": "name"}, {"api_name": "utilities.models.data_models.set_service_vehicle", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 117, "usage_type": "name"}, {"api_name": "utilities.models.data_models.Service", "line_number": 119, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 116, "usage_type": "attribute"}, {"api_name": "utilities.models.data_models.Service", "line_number": 126, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}]}
+{"seq_id": "520166508", "text": "# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tempfile\nimport os\n\nimport tensorflow as tf\nimport numpy as np\nimport datetime\n\nfrom tensorflow import keras\n\n# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 to 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\ntrain_images = tf.expand_dims(train_images, -1)\ntest_images = tf.expand_dims(test_images, -1)\n\nfrom tensorflow_model_optimization.quantization.keras.vitis.layers import vitis_activation\n\n# Define the model architecture.\ninputs = keras.layers.Input(shape=(28, 28, 1))\nx = keras.layers.Conv2D(\n filters=32, kernel_size=(3, 3), use_bias=True, activation='linear')(\n inputs)\nx = keras.layers.BatchNormalization()(x)\nx = keras.layers.Activation('relu')(x)\nx = keras.layers.DepthwiseConv2D(\n kernel_size=(3, 3), use_bias=True, activation='linear')(\n x)\nx = keras.layers.BatchNormalization()(x)\nx = keras.layers.Activation('gelu')(x)\nx = keras.layers.Flatten()(x)\nx = keras.layers.Dropout(rate=0.1)(x)\nx = keras.layers.Dense(10)(x)\npredictions = x\nmodel = keras.Model(inputs=inputs, outputs=predictions, name=\"mnist_model\")\n\n# Train the float model\nmodel.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['sparse_categorical_accuracy'])\n\nlog_dir = \"logs/float_fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, histogram_freq=1)\nmodel.fit(\n train_images,\n train_labels,\n epochs=1,\n validation_data=(test_images, test_labels))\n\nmodel.save('float.h5')\nmodel.evaluate(test_images, test_labels, batch_size=500)\n\n# Inspect the float model\nfrom tensorflow_model_optimization.quantization.keras import vitis_inspect\n\n# `target` is the target DPU to deploy this model, it can be a name(e.g. \"DPUCZDX8G_ISA1_B4096\"),\n# a json(e.g. \"./U50/arch.json\") or a fingerprint.\ninspector = vitis_inspect.VitisInspector(target='DPUCZDX8G_ISA1_B4096')\n\n# In this model only `gelu` layer is not supported by DPU target.\n# Inspect results will be shown on screen, and more detailed results will be saved in\n# 'inspect_results.txt'. We can also visualize the results in 'model.svg'.\ninspector.inspect_model(\n model,\n input_shape=[1, 28, 28, 1],\n dump_model=True,\n dump_model_file='inspect_model.h5',\n plot=True,\n plot_file='model.svg',\n dump_results=True,\n dump_results_file='inspect_results.txt',\n verbose=0)\n", "sub_path": "examples/vai_quantizer/tensorflow2x/mnist_cnn_inspect.py", "file_name": "mnist_cnn_inspect.py", "file_ext": "py", "file_size_in_byte": 3233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.keras.datasets", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 38, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.DepthwiseConv2D", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 47, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 49, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 52, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.TensorBoard", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_inspect.VitisInspector", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow_model_optimization.quantization.keras.vitis_inspect", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "624795403", "text": "import facepy\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom . facepy_wrapper import utils\n\nGRAPH_MAX_TRIES = 3\nFACEBOOK_TIMEOUT = getattr(settings, 'FACEBOOK_AUTH_BACKEND_FACEBOOK_TIMEOUT',\n timezone.timedelta(seconds=20).total_seconds())\nFACEBOOK_API_VERSION = getattr(settings, 'FACEBOOK_API_VERSION', '2.1')\n\n\ndef get_from_graph_api(graphAPI, query):\n for i in range(GRAPH_MAX_TRIES):\n try:\n return graphAPI.get(query)\n except facepy.FacepyError as e:\n if i == GRAPH_MAX_TRIES - 1 or getattr(e, 'code', None) != 1:\n raise\n\n\ndef get_application_graph(version=None):\n version = version or FACEBOOK_API_VERSION\n token = (facepy.utils\n .get_application_access_token(settings.FACEBOOK_APP_ID,\n settings.FACEBOOK_APP_SECRET,\n api_version=version))\n return get_graph(token)\n\n\ndef get_graph(*args, **kwargs):\n version = FACEBOOK_API_VERSION\n return utils.get_graph(*args, version=version, timeout=FACEBOOK_TIMEOUT, **kwargs)\n\n\ndef get_long_lived_access_token(access_token):\n return utils.get_long_lived_access_token(\n access_token=access_token,\n client_id=settings.FACEBOOK_APP_ID,\n client_secret=settings.FACEBOOK_APP_SECRET,\n )\n\n\ndef get_access_token(code=None, redirect_uri=None):\n return utils.get_access_token(\n code=code,\n redirect_uri=redirect_uri,\n client_id=settings.FACEBOOK_APP_ID,\n client_secret=settings.FACEBOOK_APP_SECRET,\n timeout=FACEBOOK_TIMEOUT,\n )\n", "sub_path": "facebook_auth/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.conf.settings", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 10, "usage_type": "argument"}, {"api_name": "facepy.FacepyError", "line_number": 17, "usage_type": "attribute"}, {"api_name": "facepy.utils.get_application_access_token", "line_number": 24, "usage_type": "call"}, {"api_name": "facepy.utils", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_graph", "line_number": 33, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 33, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_long_lived_access_token", "line_number": 37, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "facepy_wrapper.utils.get_access_token", "line_number": 45, "usage_type": "call"}, {"api_name": "facepy_wrapper.utils", "line_number": 45, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_ID", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "django.conf.settings.FACEBOOK_APP_SECRET", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "434905892", "text": "from collections import OrderedDict\nfrom dataclasses import dataclass, field\nfrom enum import Enum, auto\nfrom typing import Dict, List, Optional\n\nfrom sqlalchemy.sql.schema import Column, ForeignKeyConstraint, Table\n\n\n@dataclass\nclass Model:\n name: str\n table: Table\n\n @property\n def schema(self) -> str:\n return self.table.schema\n\n\n@dataclass\nclass ModelClass(Model):\n columns: Dict[str, Column] = field(default_factory=OrderedDict)\n relationships: Dict[str, 'Relationship'] = field(default_factory=OrderedDict)\n parent_class: Optional['ModelClass'] = None\n children: List['ModelClass'] = field(default_factory=list)\n\n\nclass RelationshipType(Enum):\n ONE_TO_ONE = auto()\n ONE_TO_MANY = auto()\n MANY_TO_ONE = auto()\n MANY_TO_MANY = auto()\n\n\n@dataclass\nclass Relationship:\n type: RelationshipType\n source: ModelClass\n target: ModelClass\n constraint: Optional[ForeignKeyConstraint] = None\n association_table: Optional[Model] = None\n backref: Optional[str] = None\n remote_side: List[str] = field(default_factory=list)\n foreign_keys: List[str] = field(default_factory=list)\n primaryjoin: Optional[str] = None\n secondaryjoin: Optional[str] = None\n", "sub_path": "src/sqlacodegen/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlalchemy.sql.schema.Table", "line_number": 12, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.Column", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 22, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 24, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 19, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 27, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 28, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 29, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 30, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.schema.ForeignKeyConstraint", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 45, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 34, "usage_type": "name"}]}
+{"seq_id": "174577214", "text": "from collections.abc import Iterator, Iterable\nfrom typing import Any, List\n\n\nclass MyIterator(Iterator):\n def __init__(self, collection: List[Any]) -> None:\n self._collection = collection\n self._index = 0\n\n def __next__(self):\n try:\n item = self._collection[self._index]\n self._index += 1\n return item\n except IndexError:\n raise StopIteration\n\n\nclass ReverseIterator(Iterator):\n def __init__(self, collection: List[Any]) -> None:\n self._collection = collection\n self._index = -1\n\n def __next__(self):\n try:\n item = self._collection[self._index]\n self._index -= 1\n return item\n except IndexError:\n raise StopIteration\n\n\nclass MyList(Iterable):\n def __init__(self) -> None:\n self._items: List[Any] = []\n self._my_iterator = MyIterator(self._items)\n\n def add(self, value: Any) -> None:\n self._items.append(value)\n\n def __iter__(self) -> Iterator:\n return self._my_iterator\n\n def reverse_iterator(self) -> Iterator:\n return ReverseIterator(self._items)\n\n def __str__(self) -> str:\n return f'{self.__class__.__name__}({self._items})'\n\n\nif __name__ == \"__main__\":\n mylist = MyList()\n mylist.add('Carlos')\n mylist.add('Eduardo')\n mylist.add('Rocha')\n\n print(mylist)\n\n\n for value in mylist:\n print(value)\n\n print()\n\n for value in mylist.reverse_iterator():\n print(value)", "sub_path": "behavioral/iterator/iterator.py", "file_name": "iterator.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.abc.Iterator", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.abc.Iterator", "line_number": 44, "usage_type": "name"}]}
+{"seq_id": "445222313", "text": "import pygame,time, math, random, sys,os,random\r\nfrom pygame.locals import *\r\nimport RPi.GPIO as GPIO\r\nfrom gpiozero import Button\r\n\r\n\r\nbutton_jump = Button(23)\r\nbutton_shield = Button(24)\r\n\r\ndef game_initialization():\r\n \r\n global dino1,dino2,background,background1,background2,background3,background4,background5,backgroundx\r\n global e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10\r\n global sky1,sky2\r\n global gameover,shield\r\n dino1 = pygame.image.load('Dino1.png')\r\n dino2 = pygame.image.load('Dino2.png')\r\n background = pygame.image.load('bg.png')\r\n background1 = pygame.image.load('bg.png')\r\n background2 = pygame.image.load('bg2.png')\r\n background3 = pygame.image.load('bg3.png')\r\n background4 = pygame.image.load('bg4.png')\r\n background5 = pygame.image.load('bg5.png')\r\n backgroundx = pygame.image.load('bgx.png')\r\n e0= pygame.image.load('energy_0.png')\r\n e1= pygame.image.load('energy_1.png')\r\n e2= pygame.image.load('energy_2.png')\r\n e3= pygame.image.load('energy_3.png')\r\n e4= pygame.image.load('energy_4.png')\r\n e5= pygame.image.load('energy_5.png')\r\n e6= pygame.image.load('energy_6.png')\r\n e7= pygame.image.load('energy_7.png')\r\n e8= pygame.image.load('energy_8.png')\r\n e9= pygame.image.load('energy_9.png')\r\n e10= pygame.image.load('energy_10.png')\r\n sky1 = pygame.image.load('sk1.png')\r\n sky2 = pygame.image.load('sk2.png')\r\n gameover = pygame.image.load('gameover.png')\r\n shield= pygame.image.load('shield.png')\r\n\r\n global jump_music,die_music,shield_music\r\n pygame.mixer.init()\r\n pygame.time.delay(1000)\r\n jump_music = pygame.mixer.Sound('jump.wav')\r\n jump_music.set_volume(0.2)\r\n die_music = pygame.mixer.Sound('die.wav')\r\n die_music.set_volume(1.0)\r\n shield_music = pygame.mixer.Sound('shield.wav')\r\n shield_music .set_volume(1.0)\r\n\r\n pygame.mixer.music.load(\"david_bgm.mp3\") #load bgm \r\n pygame.mixer.music.play(-1)\r\n\r\n global FPS, bg_x, sk_x,W,H\r\n W, H = 600, 400\r\n HW, HH = W / 2, H / 2\r\n AREA = W * H\r\n FPS = 100\r\n bg_x = 0\r\n sk_x = 0\r\n\r\n global screen\r\n pygame.init()\r\n screen = pygame.display.set_mode((W,H))\r\n\r\n global skset,bgset,d,d1,d2,d3,d4,dset,e\r\n skset=[sky1,sky2]\r\n bgset=[background1,background2,background3,background4,background5]\r\n d = [[872,220,909],[1207,196,1248],[1655,218,1693],[2184,225,2218],[2676,189,2727],[3271,198,3317]]\r\n d1 = [[872,220,909],[1207,196,1248],[1655,218,1693],[2184,225,2218],[2676,189,2727],[3271,198,3317]]\r\n d2 = [[150,175,190],[414,221,595],[982,173,1021],[1411,185,1446],[1650,227,1781],[3222,175,3276]]\r\n d3 = [[546,141,613],[1177,153,1252],[1983,109,2009],[2741,177,3005],[3781,125,3850]]\r\n d4 = [[270,151,365],[666,178,908],[1706,226,2026],[3169,117,3301]]\r\n dset=[d1,d2,d3,d4]\r\n e=[e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10]\r\n\r\n global game_time, score, clock, image, pos_x, pos_y, david, next_bg, next_sk, ground_speed, sky_speed, flash_freq, z, op, energy,fly, flash_time, is_flash, frame, playing\r\n clock = pygame.time.Clock()\r\n image_width = 20 #if you change the image , you need to fill new image width \r\n pos_x = 50\r\n pos_y = 259-image_width\r\n david = David(pos_x, pos_y)\r\n next_bg=random.randint(0,len(dset)-1)\r\n next_sk=random.randint(0,1)\r\n ground_speed = 5\r\n sky_speed = 1\r\n flash_freq = FPS/8\r\n z = 0\r\n op = 1 # op change image of dino\r\n energy = 0\r\n fly = 0\r\n flash_time = 0\r\n is_flash = 0\r\n frame = 0\r\n score = 0\r\n game_time = 0\r\n playing = 1 # playing = 1 -> enter or replay\r\n\r\n\r\n \r\nclass David():\r\n\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.isJump = False\r\n self.jumpCount = 15\r\n \r\n\r\n def draw(self,x,op,fly, energy ,is_flash,frame,flash_freq):\r\n myFont = pygame.font.SysFont(\"Times New Roman\", 18)\r\n mytime = myFont.render(str(round(game_time,2))+'s',1,(0,0,0))\r\n screen.blit(mytime,(x+20,int(self.y)+20))\r\n if op==1:\r\n screen.blit(dino1, (x,int(self.y)))\r\n else:\r\n screen.blit(dino2, (x,int(self.y)))\r\n if fly:\r\n screen.blit(shield, (x-20,int(self.y)-5))\r\n else:\r\n if is_flash:\r\n if frame < flash_freq/2:\r\n screen.blit(shield, (x-20,int(self.y)-5))\r\n \r\n \r\n\r\n def jump(self):\r\n if self.isJump:\r\n if self.jumpCount >= -15: \r\n neg = 1\r\n if self.jumpCount < 0:\r\n neg = -1\r\n self.y -= self.jumpCount**2 * 0.1 * neg\r\n self.jumpCount -= 0.5\r\n else:\r\n self.isJump = False\r\n self.jumpCount = 15\r\n \r\n def check(self,x,z,d,bg_x):\r\n global score\r\n if ( (x>=d[z][0]) and (self.y>=d[z][1]) and (x<=d[z][2])):\r\n die_music.play()\r\n screen.blit(gameover,(0,0))\r\n myFont = pygame.font.SysFont(\"Times New Roman\", 30)\r\n mytime = myFont.render(\"Your score is : \"+str(score),1,(0,0,0))\r\n screen.blit(mytime,(300,350))\r\n pygame.mixer.music.stop( ); # end the original bgm before showing the lose screen\r\n pygame.display.update( )#update the screen of result\r\n pygame.mixer.music.load('lose_bgm.mp3')# load the lose bgm\r\n pygame.mixer.music.play( )#play lose bgm (sad violin)\r\n time.sleep(11)#wait for palying whole song\r\n return 0\r\n #pygame.quit()\r\n #sys.exit()\r\n else:\r\n return 1\r\n \r\n#This is teacher's reference code\r\ndef RCtime(RCpin): \r\n reading = 0\r\n GPIO.setup(RCpin,GPIO.OUT)\r\n GPIO.output(RCpin,GPIO.LOW)\r\n time.sleep(0.5)\r\n GPIO.setup(RCpin,GPIO.IN)\r\n while(GPIO.input(RCpin)==GPIO.LOW):\r\n reading += 1\r\n return reading\r\n\r\n\r\ndef game_loop():\r\n \r\n global dino1,dino2,background,background1,background2,background3,background4,background5,backgroundx\r\n global e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10\r\n global sky1,sky2\r\n global gameover,shield\r\n global jump_music,die_music,shield_music\r\n global FPS, bg_x, sk_x,W,H\r\n global screen\r\n global skset,bgset,d,d1,d2,d3,d4,dset,e\r\n global clock, image, pos_x, pos_y, david, next_bg, next_sk, ground_speed, sky_speed, flash_freq, z, op, energy,fly, flash_time, is_flash, frame, playing\r\n global threshold,button_jump,button_shield\r\n global score,game_time\r\n \r\n\r\n #button_jump.wait_for_press() #get start signal to start game\r\n #button_shield.wait_for_press()#same as above\r\n \r\n while True:\r\n score = score + ground_speed\r\n game_time = game_time + round(6/FPS,2)\r\n frame=(frame+1)%flash_freq\r\n if button_jump.is_pressed==0:#this is jump\r\n if not david.isJump:\r\n jump_music.play()\r\n david.isJump = True\r\n if button_shield.is_pressed==0:#this is shield\r\n if fly==0:\r\n if energy>300 and (not is_flash):\r\n shield_music.play()\r\n fly = 1\r\n flash_time = energy\r\n ground_speed = ground_speed + 10\r\n \r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_q:\r\n sys.exit()\r\n \r\n\r\n clock.tick(FPS)\r\n pressed_keys = pygame.key.get_pressed()\r\n screen.blit(background, (bg_x,0))\r\n screen.blit(bgset[next_bg], (bg_x+4000,0))\r\n screen.blit(sky1,(sk_x,0))\r\n screen.blit(skset[next_sk],(sk_x+4000,0))\r\n\r\n \r\n \r\n if(z0:\r\n energy=max(energy-5,0)\r\n \r\n else:\r\n fly=0\r\n ground_speed = ground_speed-10\r\n is_flash = 1\r\n\r\n if is_flash:\r\n if flash_time>0:\r\n flash_time = flash_time-5\r\n else:\r\n is_flash = 0\r\n \r\n david.draw(pos_x,op,fly,energy,is_flash,frame,flash_freq)\r\n david.jump()\r\n bg_x = bg_x-ground_speed\r\n sk_x = sk_x-sky_speed\r\n \r\n if op==1: op=2 #change Dino picture for every loop \r\n else: op=1\r\n \r\n if bg_x<=-4000:\r\n if not fly:\r\n ground_speed = min(ground_speed+1,10)\r\n bg_x=bg_x+4000\r\n z=0\r\n if next_bg==0:\r\n background = pygame.image.load('bg.png')\r\n elif next_bg==1:\r\n background = pygame.image.load('bg2.png')\r\n elif next_bg==2:\r\n background = pygame.image.load('bg3.png')\r\n elif next_bg==3:\r\n background = pygame.image.load('bg4.png')\r\n elif next_bg==4:\r\n background = pygame.image.load('bg5.png')\r\n d=dset[next_bg][:]\r\n next_bg=random.randint(0,len(dset)-1)\r\n \r\n \r\n if sk_x <= -4000:\r\n sky_speed= min(sky_speed+1,5)\r\n sk_x = sk_x+4000\r\n if next_sk == 0:\r\n sky1 = pygame.image.load('sk1.png')\r\n elif next_sk==1:\r\n sky1 = pygame.image.load('sk2.png')\r\n next_sk = random.randint(0,1)\r\n screen.blit(e[int(energy/100)],(300,300))\r\n \r\n myFont = pygame.font.SysFont(\"Times New Roman\", 28)\r\n mytime = myFont.render(\"Your score is : \"+str(score),1,(0,0,0))\r\n screen.blit(mytime,(20,314))\r\n \r\n pygame.display.update()\r\n\r\ndef play_again():\r\n print('press jump to play again')\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if button_jump.is_pressed==0:\r\n return 1\r\n elif button_shield.is_pressed==0:\r\n return 0\r\n \r\n \r\ndef game_play():\r\n while True:\r\n rct = RCtime(25)\r\n print(rct)\r\n if rct < 23000:\r\n game_initialization()\r\n game_loop()\r\n pygame.quit()\r\n sys.exit()\r\n break\r\n \r\n\r\nif __name__ == '__main__':\r\n game_play()\r\n \r\n", "sub_path": "DAVID_rpi_vesion.py", "file_name": "DAVID_rpi_vesion.py", "file_ext": "py", "file_size_in_byte": 10972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "gpiozero.Button", "line_number": 7, "usage_type": "call"}, {"api_name": "gpiozero.Button", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 78, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 144, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 147, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 150, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 151, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 161, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 161, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 161, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 162, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 162, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 162, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 163, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 164, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 164, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.input", "line_number": 165, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 165, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 206, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 207, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 214, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 223, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 226, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 266, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 268, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 270, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 270, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 272, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 274, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 274, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 276, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 283, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 283, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 285, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 285, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 286, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 289, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 293, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 300, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 301, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 315, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 316, "usage_type": "call"}]}
+{"seq_id": "451461254", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('type_one', '0002_auto_20151022_1958'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ListFailureFiltering',\n fields=[\n ('listfailure_ptr', models.OneToOneField(auto_created=True, primary_key=True, to='type_one.ListFailure', parent_link=True, serialize=False)),\n ('station_name_1', models.ForeignKey(blank=True, null=True, verbose_name='Станция', to='type_one.Station')),\n ],\n bases=('type_one.listfailure', models.Model),\n ),\n ]\n", "sub_path": "view_failures/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "339455496", "text": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_pandas_style_constructor():\n a = ak.Array({\"x\": [1, 2, 3], \"y\": [[1, 2, 3], [], [4, 5]]})\n assert ak.to_list(a) == [\n {\"x\": 1, \"y\": [1, 2, 3]},\n {\"x\": 2, \"y\": []},\n {\"x\": 3, \"y\": [4, 5]},\n ]\n\n\npyarrow = pytest.importorskip(\"pyarrow\")\n\n\ndef test_pyarrow_constructor():\n a = ak.Array(pyarrow.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]))\n assert ak.to_list(a) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n", "sub_path": "tests/test_0381-fill-documentation-stubs-3.py", "file_name": "test_0381-fill-documentation-stubs-3.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "awkward.Array", "line_number": 9, "usage_type": "call"}, {"api_name": "awkward.to_list", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.importorskip", "line_number": 17, "usage_type": "call"}, {"api_name": "awkward.Array", "line_number": 21, "usage_type": "call"}, {"api_name": "awkward.to_list", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "564153685", "text": "#THIS SCRIPT FETCHES USER_IDs FROM DATABASE AND DOWNLOADS ALL THE FOLLOWERS OF EACH USER_ID\n#IN CASE OF THE API THRESHOLD IS REACHED IT WILL WAIT AND CALL THE API AGAIN IN 5 MINUTES\n#QUERY TO FETCH THE USER_IDs CAN BE CHANGED AS NEEDED\nimport oauth2\nimport json\nimport psycopg2 as pc\nimport psycopg2.extras\nimport time\nimport sys\n\n\nconn = pc.connect(\"host=YourHost user=YourUser password=YourPass dbname=YourDB\")\nc1 = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\nc2 = conn.cursor()\nc1.execute(\"WITH CTE AS (SELECT DISTINCT TWEET#>'{user,screen_name}' AS user_name,TWEET#>'{user,id}' AS user_id,(TWEET#>>'{user,followers_count}')::int as followers_count\\\n ,row_number() over(partition by TWEET#>'{user,screen_name}',TWEET#>'{user,id}' order by (TWEET#>>'{user,followers_count}')::int asc)\\\n FROM PARIS_ALL A WHERE EXISTS (SELECT TWEET_ID FROM HASHTAGS_PARIS_ALL WHERE TWEET_ID = A.TWEET_ID AND hashtag = 'ParisAttacks'\\\n ) AND NOT EXISTS (SELECT USER_ID FROM FOLLOWERS WHERE USER_ID = (A.TWEET#>>'{user,id}')::BIGINT)\\\n AND NOT EXISTS (SELECT USER_ID FROM FOLLOWERS_OF_FOLLOWERS WHERE USER_ID = (A.TWEET#>>'{user,id}')::BIGINT) AND RETWEETS > 0\\\n ORDER BY (TWEET#>>'{user,followers_count}')::int ASC)\\\n SELECT * FROM CTE WHERE ROW_NUMBER=1;\")\ndef oauth_req(url, key, secret, http_method='GET', post_body='', http_headers=None):\n consumer = oauth2.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n token = oauth2.Token(key=key, secret=secret)\n client = oauth2.Client(consumer, token)\n resp, content = client.request( url, method=http_method, body=post_body, headers=http_headers )\n return content\nCONSUMER_KEY = \"YourConsumerKey\";\nCONSUMER_SECRET = \"YourConsumerSecret\";\nACCESS_TOKEN = \"YourAccessToken\";\nACCESS_SECRET = \"YourAccessSecret\";\n\nfor row in c1:\n cursor = -1\n print(row[\"user_name\"])\n response = oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET)\n followers = json.loads(response)\n #print(followers)\n i = 0\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET))\n print(followers)\n if(followers.has_key('errors') == True):\n print(str(followers['errors'][0]['message']))\n if(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Sorry, that page does not exist.'):\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],0)\n c2.execute(sql)\n if (followers.has_key(\"ids\")):\n print(len(followers['ids']))\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],id)\n c2.execute(sql)\n i=i+1\n if(i%1000==0):\n print(i)\n while(next_cursor!=0):\n response = oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(next_cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET)\n followers = json.loads(response)\n #print(followers)\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(next_cursor,row[\"user_name\"]),ACCESS_TOKEN, ACCESS_SECRET))\n print(followers)\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],id)\n c2.execute(sql)\n i=i+1\n if(i%10000==0):\n print(i)\n conn.commit()\n else:\n sql = \"INSERT INTO FOLLOWERS(USER_ID,FOLLOWER_ID) VALUES ({0},{1})\".format(row[\"user_id\"],0)\n c2.execute(sql)\n conn.commit()\n print(\"Profile private or deleted\")\nc1.close()\nc2.close()\nconn.close()\n \n \n\n", "sub_path": "twitter_search_api_followers.py", "file_name": "twitter_search_api_followers.py", "file_ext": "py", "file_size_in_byte": 4561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "psycopg2.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "psycopg2.extras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "oauth2.Consumer", "line_number": 23, "usage_type": "call"}, {"api_name": "oauth2.Token", "line_number": 24, "usage_type": "call"}, {"api_name": "oauth2.Client", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}]}
+{"seq_id": "131005703", "text": "##@namespace produtil.mpi_impl.mpi_impl_base\n# Utilities like CMDFGen to simplify adding new MPI implementations to the \n# produtil.run suite of modules.\n#\n# This module contains classes and functions to assist developers in\n# extending the functionality of the produtil.mpi_impl package. The\n# main highlight is the CMDFGen, which generates command files. Some\n# MPI implementations, and the mpiserial program, want to read a file\n# with one line per MPI rank telling what program to run on each rank.\n# For example, LSF+IBMPE and LoadLeveler+IBMPE work this way if one\n# wants to run different programs on different ranks.\n\nimport tempfile,stat,os, logging\n\nmodule_logger=logging.getLogger('produtil.mpi_impl')\n\nclass MPIConfigError(Exception): \n \"\"\"!Base class of MPI configuration exceptions.\"\"\"\nclass WrongMPI(MPIConfigError): \n \"\"\"!Unused: raised when the wrong MPI implementation is accessed. \"\"\"\nclass MPISerialMissing(MPIConfigError):\n \"\"\"!Raised when the mpiserial program is required, but is missing.\"\"\"\nclass MPIAllRanksError(MPIConfigError):\n \"\"\"!Raised when the allranks=True keyword is sent to mpirun or mpirunner,\nbut the MPI program specification has more than one rank.\"\"\"\nclass MPIMixed(MPIConfigError):\n \"\"\"!Thrown to indicate serial and parallel processes are being mixed in a single mpi_comm_world.\"\"\"\nclass MPIDisabled(MPIConfigError):\n \"\"\"!Thrown to MPI is not supported.\"\"\"\nclass OpenMPDisabled(MPIConfigError):\n \"\"\"!Raised when OpenMP is not supported by the present implementation.\"\"\"\nclass CMDFGen(object):\n \"\"\"!Generates files with one line per MPI rank, telling what\n program to run on each rank.\n\n This class is used to generate command files for mpiserial, poe or\n mpirun.lsf. Command files are files with one MPI rank per line\n containing a shell command to run for that rank. Generally the\n input (lines) is generated by the to_arglist function in a\n subclass of produtil.mpiprog.MPIRanksBase. See the\n produtil.mpi_impl.mpirun_lsf for an example of how to use this.\"\"\"\n def __init__(self,base,lines,cmd_envar='SCR_CMDFILE',\n model_envar=None,filename_arg=False,**kwargs):\n \"\"\"!CMDFGen constructor\n \n @param base type of command file being generated. See below.\n @param lines the command file contents as a list of strings, one per line\n @param cmd_envar environment variable to set to the command file path\n @param model_envar environment variable to set to \"MPMD\" \n @param kwargs Sets the command file name. See below.\n @param filename_arg If True, the name of the command file is appended to the program argument list.\n\n The command file is generated from\n tempfile.NamedTemporaryFile, passing several arguments from\n kwargs, if provided, or suitable defaults otherwise. There\n are several arguments used. In all cases, replace \"base\" with\n the contents of the @c base argument:\n\n * base_suffix --- temporary file suffix (default: \"base.\")\n * base_prefix --- temporary file prefix (default: \".cmdf\")\n * base_tempdir --- directory in which to create the file\n\n @bug The base_suffix keyword is used for both the suffix and prefix\"\"\"\n assert(base is not None)\n assert(isinstance(lines,list))\n assert(len(lines)>0)\n assert(isinstance(lines[0],basestring))\n assert(len(lines[0])>0)\n self.filename=kwargs.get(str(base),None)\n self.tmpprefix=kwargs.get('%s_suffix'%(base,),'%s.'%(base,))\n self.tmpsuffix=kwargs.get('%s_suffix'%(base,),'.cmdf')\n self.tmpdir=kwargs.get('%s_tmpdir'%(base,),'.')\n self.cmd_envar=cmd_envar\n self.model_envar=model_envar\n self.filename_arg=filename_arg\n out='\\n'.join(lines)\n if len(out)>0:\n out+='\\n'\n self.cmdf_contents=out\n return\n ##@var filename\n # command file's filename\n\n ##@var tmpprefix \n # temporary file prefix\n \n ##@var tmpsuffix\n # temporary file suffix\n\n ##@var tmpdir\n # temporary file directory\n\n ##@var cmd_envar\n # Environment variable to set telling the path to the\n # command file\n\n ##@var model_envar\n # Environment variable to set to \"MPMD\"\n\n ##@var cmdf_contents\n # String containing the command file contents.\n\n def _add_more_vars(self,envars,logger):\n \"\"\"!Adds additional environment variables to the envars dict,\n needed to configure the MPI implementation correctly. This is\n used to set MP_PGMMODEL=\"MPMD\" if the constructor receives\n model_envar=\"MP_PGMMODEL\".\n\n @param envars[out] the dict to modify\n @param logger a logging.Logger for log messages\"\"\"\n if self.model_envar is not None:\n if logger is not None:\n logger.info('Set %s=\"MPMD\"'%(self.model_envar,))\n envars[self.model_envar]='MPMD'\n def __call__(self,runner,logger=None):\n \"\"\"!Adds the environment variables to @c runner and creates the command file.\n\n @param[out] runner A produtil.prog.Runner to modify\n @param logger a logging.Logger for log messages\"\"\"\n if logger is None: logger=module_logger\n if self.filename is not None:\n with open(self.filename,'wt') as f:\n f.write(self.cmdf_contents)\n if logger is not None:\n logger.info('Write command file to %s'%(repr(filename),))\n kw={self.cmd_envar: self.filename}\n self._add_more_vars(kw,logger)\n if logger is not None:\n for k,v in kw.iteritems():\n logger.info('Set %s=%s'%(k,repr(v)))\n if self.filename_arg:\n runner=runner[self.filename]\n return runner.env(**kw)\n else:\n with tempfile.NamedTemporaryFile(mode='wt',suffix=self.tmpsuffix,\n prefix=self.tmpprefix,dir=self.tmpdir,delete=False) as t:\n if logger is not None:\n logger.info('Write command file to %s'%(repr(t.name),))\n t.write(self.cmdf_contents)\n # Make the file read-only and readable for everyone:\n os.fchmod(t.fileno(),stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)\n kw={self.cmd_envar: t.name}\n self._add_more_vars(kw,logger)\n if logger is not None:\n for k,v in kw.iteritems():\n logger.info('Set %s=%s'%(k,repr(v)))\n runner.env(**kw)\n if self.filename_arg:\n runner=runner[t.name]\n return runner\n", "sub_path": "NEMS/tests/produtil/ush/produtil/mpi_impl/mpi_impl_base.py", "file_name": "mpi_impl_base.py", "file_ext": "py", "file_size_in_byte": 6664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 135, "usage_type": "call"}, {"api_name": "os.fchmod", "line_number": 141, "usage_type": "call"}, {"api_name": "stat.S_IRUSR", "line_number": 141, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 141, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 141, "usage_type": "attribute"}]}
+{"seq_id": "462173021", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 8 13:19:54 2021\r\n\r\n@author: 91937\r\n\"\"\"\r\n\r\n\r\n# implementation of queue\r\n# based on first in first out\r\n# enqueue opration means insertion \r\n# dequeue opration means deletion\r\n\r\n\r\n# implementation using list\r\nclass queue:\r\n def __init__(self):\r\n self.q = []\r\n \r\n def enqueue(self,item):\r\n self.q.insert(0,item)\r\n \r\n def dequeue(self):\r\n self.q.pop() \r\n# we can define seek and length function also\r\n# we are inserting 1 then 2 then 3\r\n# so in queue -----> 3--2--1 \r\na = queue()\r\na.enqueue(1)\r\na.enqueue(2)\r\na.enqueue(3)\r\nprint(a.q)\r\n# perform dequeue operation on the queue so output will be ----> 3--2 \r\na.dequeue()\r\nprint(a.q)\r\n\r\n\r\n\r\n# implementing using collection module\r\nfrom collections import deque\r\nq1 = deque()\r\nq1.appendleft('akash') \r\nq1.appendleft('ashok') \r\nq1.appendleft('kamerkar')\r\nprint(q1) \r\nq1.pop()\r\nprint(q1)\r\n", "sub_path": "python_basic_queue_using_deque_list.py", "file_name": "python_basic_queue_using_deque_list.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.deque", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "177119114", "text": "from flask import render_template\nimport connexion\n\n# Создадим экземпляр приложения\napp = connexion.App(__name__, specification_dir='./')\n# Прочитаем файл swagger.yml для настройки конечных точек\napp.add_api('swagger.yml')\n\n\n# Создадим маршрут URL в нашем приложении для \"/\"\n@app.route('/')\ndef home():\n \"\"\"\n Эта функция просто отвечает на URL \"localhost:5000/\" в браузера\n\n :return: подствляет шаблон 'home.html'\n \"\"\"\n return render_template('home.html')\n\n\n# Если мы работаем в автономном режиме, запускаем приложение\nif __name__ == '__main__':\n app.run(host='localhost', port=1356, debug=True)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "connexion.App", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}]}
+{"seq_id": "374888412", "text": "import matplotlib.pyplot as plt\n\n\na=[0,1,2,3,4,5,6,7]\nb=[10,12,13,14,15,12,16,12]\nc=[20,28,26,24,26,27,21,28]\nd=[41,47,49,45,43,49,45,46]\nplt.title(\"My Graph\")\nplt.plot(a,label=\"first\")\nplt.plot(a,c,label=\"second\")\nplt.plot(a,d,label=\"third\",lw=5)\nplt.legend(loc=\"best\",shadow=True)\n\nplt.grid()\nplt.show()\n\n", "sub_path": "requirements/venky_task/AI/matplotlib/2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.title", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "275255090", "text": "#!/bin/python\n\nimport requests\nimport pprint\n\nhost = \"10.0.1.103\"\n#host = \"10.0.1.124\"\n\nurl = 'https://cifarelli.net/alexa/temps'\n#url = 'http://%s:8080/alexaskills/' % host\n#url = 'http://%s:8080/alexaskills/rest/temps' % host\n#url = 'http://%s:8080/alexaskills/rest/temps/7fc6eb1a-2af1-40ca-8706-d9c546c93ea9' % host\n#url = 'http://%s:8080/alexaskills/rest/temps/Attic Temperature' % host\ndata = \"\"\"{\"type\":\"SmartThings\",\n \"title\":\"Secure\", \n \"description\":\n \"\"}\"\"\"\nheaders = {\"Content-Type\": \"application/json\"}\nresp = requests.get(url, headers=headers)\n#resp = requests.post(url, headers=headers, data=data)\nprint(\"Status returned: \", resp.status_code);\nif (resp.headers['content-type'] == 'application/json'):\n pp = pprint.PrettyPrinter(indent=3);\n pp.pprint(resp.json());\nelse:\n print(resp.text);\n\n", "sub_path": "python/alexa_test.py", "file_name": "alexa_test.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "280181508", "text": "import pymongo\nfrom word2vec import LineSentence\n\n# in_file_name = '../wiki_ja/wakati/jawiki-100.txt'\nin_file_name = '../wiki_ja/wakati/jawiki-20160407-pages-articles-multistream.xml-001.txt'\n# in_file = open(in_file_name, 'r')\nline_sentence = LineSentence(in_file_name)\ncollection = pymongo.MongoClient()['wikipedia']['sentence']\ncollection.create_index([('sent_id', pymongo.ASCENDING)],\n unique=True)\n\nfor sent_num, line in enumerate(line_sentence):\n sent_id = 'sent_{}'.format(sent_num)\n collection.insert({'sent_id': sent_id,\n 'content': line})\n\n", "sub_path": "lang_analysis/sentence2vec/save_sentence.py", "file_name": "save_sentence.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "word2vec.LineSentence", "line_number": 7, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.ASCENDING", "line_number": 9, "usage_type": "attribute"}]}
+{"seq_id": "202362376", "text": "from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('iconic/', views.iconic, name=\"iconic\"),\n path('events/', views.events, name=\"events\"),\n path('experience/', views.experience, name=\"experience\"),\n path('festival/', views.festival, name=\"festival\"),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)", "sub_path": "apps/nearby/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}]}
+{"seq_id": "49976515", "text": "from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport unittest\nfrom mobile.home import HomeMobile\nfrom mobile.payment import PaymentMobile\n\nclass PD221Mobile(unittest.TestCase):\n \n def setUp(self):\n self.driver = webdriver.Chrome(\"E:\\ourdeal\\selenium\\chromedriver\")\n \n def tearDown(self):\n pass\n \n def xtest_credit_card_transaction_with_existing_member(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(2)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyCredit()\n #paymentMobile.enterPhone()\n #paymentMobile.agreeTC()\n #payflowMobile = paymentMobile.clickCheckOutWithCreditCard()\n #payflowMobile.enterCreditCardDetail()\n \n \n def xtest_credit(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(2)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyCredit()\n \n def xtest_discount_code(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/parcfitness-3-month-gym-membership-bellevue-hill-rose-bay-and-gymtime-balgowlah-locations-feb\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(3)\n paymentMobile.enterDealerLocation()\n paymentMobile.applyDiscountCode()\n \n \n def xtest_discount_code_plus_credit(self):\n driver = self.driver\n h = HomeMobile(driver,\"http://richard.ourdeal.com.au/deal/nifty-spot-in-car-iphone--to-stereo-transmitter\")\n paymentMobile = h.clickBuy()\n \n paymentMobile.login()\n paymentMobile.selectQuantity(5)\n paymentMobile.enterShippingDetails()\n paymentMobile.enterDealerLocation()\n paymentMobile.enterCustomData()\n paymentMobile.tickOptInText(0)\n #paymentMobile.applyDiscountCode()\n #paymentMobile.applyCredit()\n paymentMobile.enterPhone()\n payFlowPage = paymentMobile.clickCheckOutWithCreditCard()\n payFlowPage.enterCreditCardDetail()\n \n \n \n ", "sub_path": "PD-221/PD221Mobile.py", "file_name": "PD221Mobile.py", "file_ext": "py", "file_size_in_byte": 2744, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "mobile.home.HomeMobile", "line_number": 20, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 35, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 45, "usage_type": "call"}, {"api_name": "mobile.home.HomeMobile", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "412047977", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('team_builder', '0004_auto_20160208_2217'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='team',\n options={'verbose_name': 'Team', 'verbose_name_plural': 'Teams'},\n ),\n migrations.AddField(\n model_name='club',\n name='slug',\n field=models.SlugField(max_length=100, null=True, blank=True),\n ),\n ]\n", "sub_path": "FancyArena/apps/team_builder/migrations/0005_auto_20160222_1137.py", "file_name": "0005_auto_20160222_1137.py", "file_ext": "py", "file_size_in_byte": 577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "400385145", "text": "from keras.layers import Bidirectional, LSTM, Input, RepeatVector, Dense\nfrom keras.layers import GlobalAveragePooling1D, concatenate, GlobalMaxPooling1D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Model\nfrom keras.layers.core import Activation\n\nfrom model_architectures.attention import AttentionLayer, AttentionWithContext\n\nclass SentencePair:\n def __init__(self, hidden_size=512, no_classes=1, use_attention=False):\n self.hidden_size = hidden_size\n self.no_classes = no_classes\n\n self.use_attention = use_attention\n\n def build(self, input_shape=[(400, 256), (256,)]):\n story_input = Input(shape=input_shape[0], name='story_input')\n x1 = Bidirectional(LSTM(self.hidden_size, return_sequences=True, kernel_initializer='glorot_uniform'), name='Bidirectional-1')(story_input)\n x1 = Bidirectional(LSTM(self.hidden_size, return_sequences=True, kernel_initializer='glorot_uniform'), name='Bidirectional-2')(x1)\n\n if self.use_attention:\n #x1 = AttentionLayer(x1)\n x1 = AttentionWithContext()(x1)\n else:\n x2 = GlobalMaxPooling1D()(x1)\n x1 = GlobalAveragePooling1D()(x1)\n x1 = concatenate([x1, x2])\n\n section_input = Input(shape=input_shape[1], name='section_input')\n x2 = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-1')(section_input)\n x2 = LeakyReLU(0.2)(x2)\n\n x = concatenate([x1, x2])\n\n x = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-2')(x)\n x = LeakyReLU(0.2)(x)\n x = Dense(self.hidden_size*2, kernel_initializer='glorot_uniform', name='Dense-3')(x)\n x = LeakyReLU(0.2)(x)\n\n x = Dense(self.no_classes, kernel_initializer='glorot_uniform', name='output')(x)\n x = Activation('sigmoid', name='sigmoid')(x)\n\n return Model(inputs=[story_input, section_input], outputs=x)\n\nif __name__ == '__main__':\n model = SentencePair(use_attention=False)\n model = model.build()\n model.summary()\n", "sub_path": "model_architectures/sentence_pair.py", "file_name": "sentence_pair.py", "file_ext": "py", "file_size_in_byte": 2078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.layers.Input", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 19, "usage_type": "call"}, {"api_name": "model_architectures.attention.AttentionWithContext", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "643529171", "text": "from PIL import Image, ImageFile\nimport fileModule\nimport io, time\n\ndef main(args):\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n fm = fileModule.FileManager()\n begin = time.time()\n data = fm.loadFile(args[\"id\"])\n elapsed = time.time() - begin\n print (\"read Time \" + repr(elapsed))\n image = Image.open(io.BytesIO(data.read()))\n\n image.thumbnail((200, 200), Image.ANTIALIAS)\n newImage = io.BytesIO()\n image.save(newImage, args[\"formatOut\"])\n newImage.seek(0)\n begin = time.time()\n retId = fm.saveFile(newImage, \"image.\" + args[\"formatOut\"])\n elapsed = time.time() - begin\n print (\"write Time \" + repr(elapsed))\n return {\"retId\": retId}\n", "sub_path": "sample_actions/images/resize.py", "file_name": "resize.py", "file_ext": "py", "file_size_in_byte": 678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 6, "usage_type": "name"}, {"api_name": "fileModule.FileManager", "line_number": 7, "usage_type": "call"}, {"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "429923724", "text": "from flask import request, render_template, redirect, url_for, flash\nfrom flask_login import login_required\nfrom app.main import main\nfrom app.main.forms import CreateKeyForm\nfrom app import api_key_api_client\nfrom app.utils import user_has_permissions\n\n\n@main.route(\"/services//api-keys\")\n@login_required\n@user_has_permissions('manage_api_keys')\ndef api_keys(service_id):\n return render_template(\n 'views/api-keys.html',\n keys=api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']\n )\n\n\n@main.route(\"/services//api-keys/create\", methods=['GET', 'POST'])\n@login_required\n@user_has_permissions('manage_api_keys')\ndef create_api_key(service_id):\n key_names = [\n key['name'] for key in api_key_api_client.get_api_keys(service_id=service_id)['apiKeys']\n ]\n form = CreateKeyForm(key_names)\n if form.validate_on_submit():\n secret = api_key_api_client.create_api_key(\n service_id=service_id,\n key_name=form.key_name.data,\n key_type=form.key_type.data\n )\n return render_template('views/api-keys/show.html', secret=secret,\n key_name=form.key_name.data)\n return render_template(\n 'views/api-keys/create.html',\n form=form\n )\n\n\n@main.route(\"/services//api-keys/revoke/\", methods=['GET', 'POST'])\n@login_required\n@user_has_permissions('manage_api_keys')\ndef revoke_api_key(service_id, key_id):\n key_name = api_key_api_client.get_api_keys(service_id=service_id, key_id=key_id)['apiKeys'][0]['name']\n if request.method == 'GET':\n return render_template(\n 'views/api-keys/revoke.html',\n key_name=key_name\n )\n elif request.method == 'POST':\n api_key_api_client.revoke_api_key(service_id=service_id, key_id=key_id)\n flash('‘{}’ was revoked'.format(key_name), 'default_with_tick')\n return redirect(url_for('.api_keys', service_id=service_id))\n", "sub_path": "app/main/views/api_keys.py", "file_name": "api_keys.py", "file_ext": "py", "file_size_in_byte": 1992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 15, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 15, "usage_type": "name"}, {"api_name": "app.main.main.route", "line_number": 9, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 9, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 11, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 24, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 24, "usage_type": "name"}, {"api_name": "app.main.forms.CreateKeyForm", "line_number": 26, "usage_type": "call"}, {"api_name": "app.api_key_api_client.create_api_key", "line_number": 28, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "app.main.main.route", "line_number": 19, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 20, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 21, "usage_type": "call"}, {"api_name": "app.api_key_api_client.get_api_keys", "line_number": 45, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "app.api_key_api_client.revoke_api_key", "line_number": 52, "usage_type": "call"}, {"api_name": "app.api_key_api_client", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 54, "usage_type": "call"}, {"api_name": "app.main.main.route", "line_number": 41, "usage_type": "call"}, {"api_name": "app.main.main", "line_number": 41, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 42, "usage_type": "name"}, {"api_name": "app.utils.user_has_permissions", "line_number": 43, "usage_type": "call"}]}
+{"seq_id": "587965867", "text": "from multiprocessing import Pool\nimport asyncio\nimport time\n\n\nasync def test(time):\n print(\"start\")\n await asyncio.sleep(time) # 调用协程内的时间间隔\n print(\"end\")\n\n\nasync def main(num):\n start_time = time.time()\n tasks = [asyncio.create_task(test(3)) for proxy in range(num)] # 注册任务列表\n print(len(tasks))\n print(\"协程结束时间:\", time.time() - start_time)\n\n\ndef run(num):\n asyncio.run(main(num)) # 使用协程调用方法\n\n\nif __name__ == \"__main__\":\n \"\"\"\n start_time = time.time()\n p = Pool()\n # 启动多个进程,在每个进程内运行协程任务\n for i in range(4):\n # apply(): 阻塞主进程, 并且一个一个按顺序地执行子进程, 等到全部子进程都执行完毕后 ,继续执行 apply()后面主进程的代码\n # apply_async() 非阻塞异步的, 他不会等待子进程执行完毕, 主进程会继续执行, 他会根据系统调度来进行进程切换\n p.apply_async(run, args=(10,))\n p.close()\n p.join()\n print(\"进程结束时间:\", time.time() - start_time)\n\n \"\"\"\n run(10)\n", "sub_path": "week07/note/进程与协程.py", "file_name": "进程与协程.py", "file_ext": "py", "file_size_in_byte": 1122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "asyncio.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 13, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "392668981", "text": "from queue_req_resp import RabbitMQ\nimport queue\nimport logging\nfrom logging.handlers import QueueHandler, QueueListener\nimport datetime\n\nclass LoggerClient():\n def __init__(self,logfile_path,console=False):\n \"\"\"\n Logger API at Client Side to store the logs locally and sent to Central Logger MQ\n Parameters - RMQ - Create a RabbitMQ Object and pass it \n - logfile_path - Path where to create log file\n - console - whether to diaplay log messages on screen - Default false\n \"\"\"\n self.RMQ = RabbitMQ()\n #Creating queue and logger\n self.log_queue = queue.Queue(-1) #infinite size\n self.queue_handler = QueueHandler(self.log_queue)\n self.logger = logging.getLogger()\n self.logger.addHandler(self.queue_handler)\n #formatter\n self.formatter = logging.Formatter(' %(message)s')\n #file handler - write to file\n self.file_handler_loc = logging.FileHandler(logfile_path)\n self.file_handler_loc.setFormatter(self.formatter)\n #console handler - print on screen\n if(console == True):\n self.console_handler = logging.StreamHandler()\n self.console_handler.setFormatter(self.formatter)\n self.listener = QueueListener(self.log_queue,self.console_handler,self.file_handler_loc )\n else:\n self.listener = QueueListener(self.log_queue,self.file_handler_loc )\n\n\n def start_logger(self):\n self.listener.start()\n\n def emit(self, record):\n return self.queue_handler.emit(record)\n\n def __del__(self):\n self.listener.stop()\n\n def log(self,msg):\n time=datetime.datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n msg=\"[\"+time+\"] : \"+msg\n self.logger.error(msg)\n msg+=\"\\n\"\n self.RMQ.send(\"\", \"To_Log\", msg)\n\n###README\n#Create RabbitMQ Object\n#Create LoogerClient Object by passing required parameters\n#call start_logger() using this Object\n#Now you can use this object to log - call Object.log(msg)\n#The log message will be saved along with date in local log file (file_path in parameters) and send to central logger through queue (and also to the console based on parameter passed)\n#Example: (test_logclient.py)\n#----------------------------\n# from logger_client import LoggerClient\n# from queue_req_resp import RabbitMQ\n# import time\n\n# def test():\n# LC = LoggerClient(\"./test_log.log\",console=True)\n# LC.start_logger()\n# for i in range(4):\n# time.sleep(1)\n# LC.log('This is a warning message')\n# LC.log('This is an error message') \n# return \n\n# test()", "sub_path": "Logger/logger_client.py", "file_name": "logger_client.py", "file_ext": "py", "file_size_in_byte": 2643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "queue_req_resp.RabbitMQ", "line_number": 15, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.handlers.QueueHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.handlers.QueueListener", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.handlers.QueueListener", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}]}
+{"seq_id": "265443078", "text": "from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.template.loader import render_to_string\n#from django.core.cache import cache\nfrom django.template.defaultfilters import escape\n\n\nfrom modules.templatetags.module_filters import truncate_chars_by_words\nfrom django.utils.html import strip_tags\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef list_pages(context):\n website = context['request'].website\n pages = website.get_pages()\n return mark_safe(render_to_string(\"pages/list_pages.html\", {\"pages\": pages}))\n\n\ndef get_meta_content(meta_content, modules, context):\n \"\"\"To-do: write docs string here\"\"\"\n if not meta_content:\n meta_content = ''\n article_meta_variables = [':articleTitle', ':articleDescription', ':articleThumb']\n topic_meta_variables = [':topicThumb', ':topicName', ':TopicDescription']\n image_meta_variables = [':imageURL', ':imageDescription', ':imageIndex']\n all_meta_variables = [':currentURL'] + article_meta_variables + topic_meta_variables + image_meta_variables\n\n for meta_variable in all_meta_variables:\n if meta_content.find(meta_variable) > -1:\n for module in modules:\n # Article related meta\n if meta_variable in article_meta_variables and module.module_type == \"article\":\n article = module.render_article(context, render=False)\n if article:\n if meta_variable == ':articleTitle':\n meta_content = meta_content.replace(':articleTitle', article.title)\n if meta_variable == ':articleDescription':\n meta_content = meta_content.replace(':articleDescription',\n escape(truncate_chars_by_words(strip_tags(article.description), 500)))\n if meta_variable == ':articleThumb':\n meta_content = meta_content.replace(':articleThumb', article.thumbnail)\n break\n # Topic related meta\n if meta_variable in topic_meta_variables and module.module_type == \"topic-name\":\n topic = module.render_topic_name(context, render=False)\n if topic:\n if meta_variable == ':topicName':\n meta_content = meta_content.replace(':topicName', topic.name)\n if meta_variable == ':TopicDescription':\n meta_content = meta_content.replace(':TopicDescription',\n escape(truncate_chars_by_words(strip_tags(topic.description), 500)))\n if meta_variable == ':topicThumb':\n meta_content = meta_content.replace(':topicThumb', topic.image_url)\n break\n\n # Image related meta\n if meta_variable in image_meta_variables and module.module_type == \"image-gallery\":\n image_gallery_data = module.render_image_gallery(context, render=False)\n try:\n image = image_gallery_data['image']\n if meta_variable == ':imageURL':\n meta_content = meta_content.replace(':imageURL', image.link)\n if meta_variable == ':imageDescription':\n meta_content = meta_content.replace(':imageDescription',\n escape(truncate_chars_by_words(strip_tags(image.description), 500)))\n if meta_variable == ':imageIndex':\n meta_content = meta_content.replace(':imageIndex', str(image.order))\n except:\n pass\n break\n\n # set current url\n if meta_variable == ':currentURL':\n meta_content = meta_content.replace(':currentURL', context['request'].build_absolute_uri())\n return meta_content\n\n\n@register.simple_tag(takes_context=True)\ndef page_seo(context):\n page = context['page']\n modules = context['modules']\n cache_key = page.get_cache_key(context, 'seo')\n page_meta_html = page.get_cache(context, cache_key=cache_key)\n if not page_meta_html:\n page_metas = \"\"\n for meta in page.metas.all():\n meta_name = meta.name.strip()\n meta_content = meta.content.strip()\n if meta_content:\n meta_content = get_meta_content(meta_content, modules, context)\n page_metas += '\\n'\n page_meta_html = page_metas\n page.set_cache(context, page_meta_html, cache_key=cache_key)\n\n return mark_safe(page_meta_html)\n\n\n@register.simple_tag(takes_context=True)\ndef page_title(context):\n \"\"\"\n get page title meta\n it set a context variable context['page_title_html']\n if its get a title then it will set the context variable\n if not then nothings\n\n Example :\n {% page_title %}\n {% if page_title_html %}\n {{ page_title_html }}\n {% else %}\n EntertaiNow.com News Network\n {% endif %}\n\n \"\"\"\n page = context['page']\n modules = context['modules']\n page_title = \"\"\n if page.page_title:\n page_title = page.page_title\n cache_key = page.get_cache_key(context, 'title')\n html_title = page.get_cache(context, cache_key=cache_key)\n if not html_title:\n page_title = get_meta_content(page_title, modules, context)\n page.set_cache(context, page_title, cache_key=cache_key)\n context['page_title_html'] = page_title\n else:\n context['page_title_html'] = html_title\n else:\n context['page_title_html'] = page_title\n return \"\"", "sub_path": "pages/templatetags/page_tags.py", "file_name": "page_tags.py", "file_ext": "py", "file_size_in_byte": 5860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.template.Library", "line_number": 11, "usage_type": "call"}, {"api_name": "django.template", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 18, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 32, "usage_type": "name"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 41, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 41, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 41, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 53, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 53, "usage_type": "call"}, {"api_name": "django.template.defaultfilters.escape", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters.truncate_chars_by_words", "line_number": 67, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 83, "usage_type": "name"}, {"api_name": "modules.templatetags.module_filters", "line_number": 92, "usage_type": "argument"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 97, "usage_type": "call"}, {"api_name": "modules.templatetags.module_filters", "line_number": 118, "usage_type": "name"}, {"api_name": "modules.templatetags.module_filters", "line_number": 125, "usage_type": "argument"}]}
+{"seq_id": "272972854", "text": "__author__ = \"Jeremy Nelson\"\n\nfrom tensorflow.keras.layers import Flatten, Dense # type: ignore\nfrom tensorflow.keras.models import Sequential # type: ignore\n\nfrom config import AIKI_NAMES, CLASS_NAMES, IMG_HEIGHT, IMG_WIDTH\n\n\ndef feedforward_model(class_names: list = CLASS_NAMES) -> Sequential:\n model = Sequential([\n # input layer\n Flatten(input_shape=(IMG_HEIGHT, IMG_WIDTH, 1)),\n # first hidden layer\n Dense(64, activation='relu'),\n # second hidden layer\n Dense(64, activation='relu'),\n # output layer\n Dense(len(class_names), activation='softmax')\n ])\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n # Model Summary\n model.summary()\n return model\n\n\ndef aiki_feedforward() -> Sequential:\n return feedforward_modal(AIKI_NAMES)\n\n\ndef digits_feedforward() -> Sequential:\n digits = [i for i in range(0, 10)]\n return feedforward_modal(digits)\n", "sub_path": "models/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "config.CLASS_NAMES", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 12, "usage_type": "call"}, {"api_name": "config.IMG_HEIGHT", "line_number": 12, "usage_type": "name"}, {"api_name": "config.IMG_WIDTH", "line_number": 12, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 9, "usage_type": "name"}, {"api_name": "config.AIKI_NAMES", "line_number": 30, "usage_type": "argument"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "654426845", "text": "from collections import Counter\n\nclass Solution:\n def majorityElement(self, nums: [int]) -> [int]:\n count = Counter()\n for num in nums:\n count[num] += 1\n if len(count) == 3:\n new_count = Counter()\n for elem, freq in count.items(): \n if freq != 1: new_count[elem] = freq - 1\n count = new_count\n \n cands = Counter(num for num in nums if num in count) \n return [num for num in cands if cands[num] > len(nums)/3]", "sub_path": "majorityElement.py", "file_name": "majorityElement.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "collections.Counter", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "436748428", "text": "import cv2\nimport matplotlib.pyplot as plt\n\ndef readimage(arg):\n img = cv2.imread(arg)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n #BGR to RGB\n#Bilateral filter is a Non linear filter prevents averaging across image edges, while averaging within smooth regions of the image,\n# and so is edge-preserving, while Gaussian Filter is not.\n\ndef contours(image):\n img_blur = cv2.bilateralFilter(image, d = 7,\n sigmaSpace = 75, sigmaColor =75)\n # Convert to grayscale\n #cv2.imshow('img', image)\n img_gray = cv2.cvtColor(img_blur, cv2.COLOR_RGB2GRAY)\n # Apply the thresholding\n a = img_gray.max()\n _, thresh = cv2.threshold(img_gray, a/2+60, a,cv2.THRESH_BINARY_INV)\n\n contours, hierarchy = cv2.findContours(\n image = thresh,\n mode = cv2.RETR_TREE,\n method = cv2.CHAIN_APPROX_SIMPLE)\n\n contours = sorted(contours, key = cv2.contourArea, reverse = True)\n\n c_0 = contours[1]\n # Obtaining the 4 points of the bounding rectangle\n # x, y, w, h = cv2.boundingRect(c_0)\n # img_copy2 = image.copy()\n # img_box = cv2.rectangle(img_copy2, (x, y), (x+w, y+h), color = (0, 255, 0), thickness = 6)\n return c_0\n\nif __name__ == '__main__':\n from sys import argv\n if len(argv) < 2:\n print (\"Usage: python %s \" % argv[0])\n exit()\n img = readimage(argv[1])\n x,y,w,h,contour_img = contours(img)\n#Region of Interest - only the business card area is selected\n roi = img[y:y+h, x:x+w]\n # cv2.imwrite(\"roi.png\", roi)\n\n plt.subplot(1, 2, 1)\n plt.imshow(img)\n plt.axis('off')\n plt.title('Original')\n\n plt.subplot(1, 2, 2)\n plt.imshow(contour_img)\n plt.axis('off')\n plt.title('Contour Image')\n\n plt.show()\n", "sub_path": "CardScanner/Contours.py", "file_name": "Contours.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.bilateralFilter", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 38, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]}
+{"seq_id": "542461692", "text": "import sys\nimport os\nimport tensorflow as tf\nimport model\nimport joblib\nimport numpy as np\nimport jieba\nfrom tensorflow.python.framework import graph_util\nimport configparser\nfrom data_utils import DataUtils\nimport multiprocessing\nfrom multiprocessing import Process, Manager\nfrom sklearn.model_selection import StratifiedKFold\nfrom data_model import DataModel\nimport re\n\nparams_cfg_file = './params.cfg'\nconfig = configparser.RawConfigParser()\nconfig.read(params_cfg_file)\nbatch_size_list = [int(v) for v in config.get('hyperparameters', 'batch_size_list').split(\",\")]\nlearning_rate_list = [float(v) for v in config.get('hyperparameters', 'learning_rate_list').split(\",\")]\ndropout_list = [float(v) for v in config.get('hyperparameters', 'dropout_list').split(\",\")]\nlayer_num_list = [int(v) for v in config.get('hyperparameters', 'layer_num_list').split(\",\")]\nhidden_num_list = [int(v) for v in config.get('hyperparameters', 'hidden_num_list').split(\",\")]\n\nsave_file_num = config.getint('hyperparameters', 'save_file_num')\nword_vec_size = config.getint('hyperparameters', 'word_vec_size')\nsentence_len = config.getint('hyperparameters', 'sentence_len')\niter_num = config.getint('hyperparameters', 'iter_num')\n\nfold_num = config.getint(\"plugin\", 'fold_num')\nbase_acc = config.getfloat('plugin', 'base_acc')\nbase_f1_score = config.getfloat('plugin', 'base_f1_score')\nsave_pb_mode = config.getboolean('plugin', 'save_pb_mode')\nprint_bad_case_mode = config.getboolean('plugin', 'print_bad_case_mode')\n\nmodel_src = config.get('data', 'model_filepath')\nidx2vec_path = config.get('data', 'idx2vec_filepath')\nword2idx_path = config.get('data', 'word2idx_filepath')\nidx2word_path = config.get('data', 'idx2word_filepath')\nlabel2idx_src = config.get('data', 'label2idx_src')\n\n# 模型可视化\n# writer = tf.summary.FileWriter(\"./model_graph/\" + visual_model_name)\n# writer.add_graph(sess.graph)\n# merged_summary = tf.summary.merge_all()\n# lstm_model.enable_visual(merged_summary)\n\n\n# 模型加载和保存\nfold_model_src_list = ['./save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.4layer_num: 2hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.4layer_num: 2hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.5layer_num: 2hidden_num: 1000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.5layer_num: 3hidden_num: 500',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 2hidden_num: 1000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 2hidden_num: 2000',\n './save_model/test/batch_size: 20learning_rate: 0.001dropout: 0.6layer_num: 3hidden_num: 500',\n ]\nlayer_num_patten = re.compile('(?<=layer_num: )[0-9]+')\nhidden_num_patten = re.compile('(?<=hidden_num: )[0-9]+')\n\nfor idx, fold_model_src in enumerate(fold_model_src_list):\n tf.reset_default_graph()\n layer_num = int(re.findall(layer_num_patten, fold_model_src)[0])\n hidden_num = int(int(re.findall(hidden_num_patten, fold_model_src)[0]) / 10)\n\n sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))\n lstm_model = model.Model(sentence_len=sentence_len, learning_rate=0.001, word_vec_size=word_vec_size,\n dropout=1, layer_num=layer_num, hidden_num=hidden_num)\n lstm_model.build(sess)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=int(save_file_num))\n ckpt = tf.train.get_checkpoint_state(fold_model_src)\n\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n if save_pb_mode is True:\n constant_graph = tf.graph_util.convert_variables_to_constants(\n sess,\n sess.graph_def,\n ['predict_result/output_result'],\n variable_names_whitelist=None,\n variable_names_blacklist=None\n )\n\n with tf.gfile.FastGFile('./pb_model/' + str(idx) + '.pb', mode='wb') as f:\n f.write(constant_graph.SerializeToString())\n", "sub_path": "generator_pb.py", "file_name": "generator_pb.py", "file_ext": "py", "file_size_in_byte": 4206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "configparser.RawConfigParser", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 63, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 64, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 67, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.graph_util.convert_variables_to_constants", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.graph_util", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.gfile.FastGFile", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 87, "usage_type": "attribute"}]}
+{"seq_id": "151473887", "text": "# uncompyle6 version 3.2.3\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 29 2018, 20:59:26) \n# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]\n# Embedded file name: /Users/ivanjut333/PycharmProjects/HipIES/pipeline/peakfinding.py\n# Compiled at: 2015-07-21 18:12:19\nfrom pylab import *\nfrom scipy import signal\nfrom scipy.ndimage import filters\nfrom hipies import debug\nimport pyqtgraph as pg\nfrom PySide import QtCore\nimport inspect\nmaxfiltercoef = 5\ncwtrange = np.arange(1, 100)\nmaxfiltercoef = 5\ncwtrange = np.arange(3, 100)\ngaussiancentersigma = 2\ngaussianwidthsigma = 5\n\n@debug.timeit\ndef findpeaks(x, y):\n cwtdata = filters.gaussian_filter1d(filters.gaussian_filter1d(signal.cwt(y, signal.ricker, cwtrange), gaussiancentersigma, axis=1), gaussianwidthsigma, axis=0)\n maxima = cwtdata == filters.maximum_filter(cwtdata, 5)\n maximaloc = np.where(maxima == 1)\n x = np.array(x)\n y = np.array(y)\n return list(np.array(np.vstack([x[maximaloc[1]], y[maximaloc[1]], maximaloc])))\n\n\nclass peaktooltip:\n\n def __init__(self, x, y, widget):\n self.q, self.I, self.width, self.index = findpeaks(x, y)\n self.scatterPoints = pg.PlotDataItem(self.q, self.I, size=10, pen=pg.mkPen(None), symbolPen=None, symbolBrush=pg.mkBrush(255, 255, 255, 120), symbol='o')\n self.display_text = pg.TextItem(text='', color=(176, 23, 31), anchor=(0, 1))\n self.display_text.hide()\n widget.addItem(self.scatterPoints)\n widget.addItem(self.display_text)\n self.scatterPoints.scene().sigMouseMoved.connect(self.onMove)\n return\n\n def onMove(self, pixelpos):\n itempos = self.scatterPoints.mapFromScene(pixelpos)\n itemx = itempos.x()\n itemy = itempos.y()\n pixeldelta = 7\n delta = self.scatterPoints.mapFromScene(QtCore.QPointF(pixeldelta + pixelpos.x(), pixeldelta + pixelpos.y()))\n deltax = delta.x() - itemx\n deltay = -(delta.y() - itemy)\n p1 = [ point for point in zip(self.q, self.I) if itemx - deltax < point[0] and point[0] < itemx + deltax and itemy - deltay < point[1] and point[1] < itemy + deltay\n ]\n if len(p1) != 0:\n self.display_text.setText('q=%f\\nI=%f' % (p1[0][0], p1[0][1]))\n self.display_text.setPos(*p1[0])\n self.display_text.show()\n else:\n self.display_text.hide()\n# okay decompiling peakfinding.pyc\n", "sub_path": "pipeline/peakfinding.py", "file_name": "peakfinding.py", "file_ext": "py", "file_size_in_byte": 2438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "scipy.ndimage.filters.gaussian_filter1d", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 23, "usage_type": "name"}, {"api_name": "scipy.signal.cwt", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 23, "usage_type": "name"}, {"api_name": "scipy.signal.ricker", "line_number": 23, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.filters.maximum_filter", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 24, "usage_type": "name"}, {"api_name": "hipies.debug.timeit", "line_number": 21, "usage_type": "attribute"}, {"api_name": "hipies.debug", "line_number": 21, "usage_type": "name"}, {"api_name": "pyqtgraph.PlotDataItem", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.mkBrush", "line_number": 35, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 36, "usage_type": "call"}, {"api_name": "PySide.QtCore.QPointF", "line_number": 48, "usage_type": "call"}, {"api_name": "PySide.QtCore", "line_number": 48, "usage_type": "name"}]}
+{"seq_id": "101339720", "text": "import sys\nimport angr\n\nproject = angr.Project(sys.argv[1], auto_load_libs=False)\n\nfree_map = {}\n\n\nclass FreeHandler(angr.SimProcedure):\n def run(self, ptr):\n caller_address = hex(self.state.addr)\n free_ptr = hex(self.state.solver.eval(self.state.regs.rdi))\n print(\"Free called on: %s\" % (free_ptr))\n if not free_ptr in free_map:\n free_map[free_ptr] = caller_address\n else:\n print(\n \"Potential Double Free: %s is trying to free %s, which has already been freed by %s\"\n % (caller_address, free_ptr, free_map[free_ptr])\n )\n\n\ndef validate_read(state):\n region = state.inspect.mem_read_address\n if region in free_map:\n free_call = free_map.get(region)\n print(\n \"Potential UAF: %s read from memory freed by %s\"\n % (region, free_call)\n )\n\n\ndef validate_write(state):\n region = state.inspect.mem_write_address\n if region in free_map:\n free_call = free_map.get(region)\n print(\n \"Potential UAF: %s wrote to memory freed by %s\"\n % (region, free_call)\n )\n\n\nproject.hook_symbol(\"free\", FreeHandler())\n\nsimgr = project.factory.simulation_manager()\ninspector = project.factory.entry_state()\n\ninspector.inspect.b(\"mem_write\", when=angr.BP_AFTER, action=validate_write)\ninspector.inspect.b(\"mem_read\", when=angr.BP_AFTER, action=validate_read)\n\nsimgr.run()\n", "sub_path": "uafinator.py", "file_name": "uafinator.py", "file_ext": "py", "file_size_in_byte": 1440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "angr.Project", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 4, "usage_type": "attribute"}, {"api_name": "angr.SimProcedure", "line_number": 9, "usage_type": "attribute"}, {"api_name": "angr.BP_AFTER", "line_number": 48, "usage_type": "attribute"}, {"api_name": "angr.BP_AFTER", "line_number": 49, "usage_type": "attribute"}]}
+{"seq_id": "142965602", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass MADE(nn.Module):\n \"\"\"\n Masked autoencoder for distribution estimation (MADE) as introduced in\n `MADE: Masked Autoencoder for Distribution Estimation `_\n (Germain et al., 2015). In consists of a series of masked linear layers and a given\n non-linearity between them.\n \"\"\"\n\n def __init__(self, *dims, activation=nn.LeakyReLU(), seed=0, permute=False):\n \"\"\"\n Initializes a new MADE model as a sequence of masked linear layers.\n\n Parameters\n ----------\n dims: varargs of int\n Dimensions of input (first), output (last) and hidden layers. At least one hidden layer\n must be defined, i.e. at least 3 dimensions must be given. The output dimension must be\n equal to the input dimension or a multiple of it.\n activation: torch.nn.Module, default: torch.nn.LeakyReLU()\n An activation function to be used after linear layers (except for the output layer).\n This module is shared for all hidden layers.\n seed: int, default: None\n A seed to use for initializing the random number generator for constructing random\n masks for the hidden layers. If set to `None`, deterministic initialization is used.\n permute: bool, default: False\n Whether to arbitrarily permute the input (permutation is applied deterministically).\n \"\"\"\n super().__init__()\n\n if len(dims) < 3:\n raise ValueError(\"MADE model must have at least one hidden layer\")\n if dims[-1] % dims[0] != 0:\n raise ValueError(\"Output dimension must be multiple of the input dimension\")\n\n self.dims = dims\n\n if seed is None:\n m_layers = _generate_sequential(dims)\n else:\n generator = torch.Generator().manual_seed(seed)\n m_layers = _generate_random_numbers(dims, generator, permute)\n\n layers = []\n for i, (in_dim, out_dim) in enumerate(zip(dims, dims[1:])):\n if i > 0:\n layers.append(activation)\n\n hidden = i < len(dims) - 2\n mask = _generate_mask(m_layers[i], m_layers[i+1], hidden=hidden)\n layers.append(_MaskedLinear(in_dim, out_dim, mask=mask))\n\n self.mlp = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n Computes the outputs of the MADE model.\n\n Parameters\n ----------\n x: torch.Tensor [..., D]\n The input (input dimension D).\n\n Returns\n -------\n torch.Tensor [..., E]\n The output (output dimension E).\n \"\"\"\n return self.mlp(x)\n\n\nclass _MaskedLinear(nn.Linear):\n\n def __init__(self, in_features, out_features, mask, bias=True):\n super().__init__(in_features, out_features, bias)\n self.register_buffer('mask', mask)\n\n def forward(self, x):\n return F.linear(x, self.weight * self.mask, self.bias)\n\n def __repr__(self):\n return f'MaskedLinear(in_features={self.in_features}, ' + \\\n f'out_features={self.out_features}, bias={self.bias is not None})'\n\n\ndef _generate_sequential(dims):\n in_dim = dims[0]\n\n degrees = [torch.arange(in_dim)]\n for dim in dims[1:]:\n degrees += [torch.arange(dim) % (in_dim - 1)]\n degrees += [torch.arange(in_dim) % in_dim - 1]\n\n return degrees\n\n\ndef _generate_random_numbers(dims, generator, permute):\n in_dim = dims[0]\n\n samples = []\n # Avoid unconnected units by sampling at least the minimum number of connected neurons in the\n # previous layer\n min_val = 0\n\n # We assign values between 0 and D-2 such that we can simply arange/permute the indices for the\n # input layer\n for i, dim in enumerate(dims[:-1]):\n if i == 0:\n m_vals = torch.randperm(dim, generator=generator) if permute else torch.arange(dim)\n else:\n m_vals = torch.randint(min_val, in_dim-1, size=(dim,), generator=generator)\n min_val = m_vals.min().item()\n samples.append(m_vals)\n\n if dims[-1] > dims[0]:\n samples.append(samples[0].repeat(dims[-1] // dims[0]))\n else:\n samples.append(samples[0])\n\n return samples\n\n\ndef _generate_mask(m_prev, m_next, hidden=True):\n if hidden:\n mask = m_next[None, :] >= m_prev[:, None]\n else: # for output layer\n mask = m_next[None, :] > m_prev[:, None]\n return mask.float().t()\n", "sub_path": "pyblaze/nn/modules/made.py", "file_name": "made.py", "file_ext": "py", "file_size_in_byte": 4498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.Generator", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.functional.linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 114, "usage_type": "call"}]}
+{"seq_id": "146026813", "text": "'''\n # @ Author: Zion Deng\n # @ Description: Try to solve the project with MPC, pyomo \n Model description: \n state: x, y, theta, xdot, ydot, thetadot \n ctrl state: F, delta \n m=x(7);\n f(1,1) = x(4); % f\n f(2,1) = x(5); % dy\n f(3,1) = x(6); % d_theta\n f(4,1) = 0.001*(-(rou*Ar*(x(4)^2)/(2*m))-u(2)*(rou*Ag*(x(4)^2)/(2*m))); % df\n f(5,1) = -g-0.001*((rou*Ar*(x(5)^2)/(2*m))-u(2)*(rou*Ag*(x(5)^2)/(2*m))); % ddy\n f(6,1) = 0;\n f(7,1) = 0; % dm\n '''\n\nimport numpy as np \nimport pyomo.environ as pyo \nimport matplotlib.pyplot as plt \n\ndef MPC_reen():\n \"\"\" \n solve with pyomo\n return: feas, xOpt, uOpt \n \"\"\" \n # Constants\n M = 326956\n ROU = 1.1\n A = 100\n g = 10\n GAMMA = 0.1\n L = 70\n J = 1/2*M*L**2 \n K = GAMMA*ROU*A*g / (2*M)\n ST = 46000\n Ar=100\n Ag=36\n\n NX = 7 # number of states\n NU = 1 # number of inputs \n DT = 1 # time interval \n N = 70 # number of total intervals \n INITIAL_STATE = [201364, 102181, -1, 852, -767, 0, 354696] \n DESIRED_STATE = [260000, 20000, -1, 200, -560, 0, 326956.0] \n P = [1e-5, 1e-4, 1, 1e-2, 1e-2, 1, 1e-5] # P matrix for terminal state cost \n FMAX = 1.1 # the max force that engine can provide \n DELTAMAX = 0.1\n m = pyo.ConcreteModel() # pyomo model\n m.tidx = pyo.Set( initialize= range(0,N+1)) # time index \n m.xidx = pyo.Set( initialize= range(0, NX)) # state index \n m.uidx = pyo.Set( initialize= range(0, NU)) # input index \n\n m.x = pyo.Var(m.xidx, m.tidx) # model x[i,t]\n m.u = pyo.Var(m.uidx, m.tidx) # model u[i,t]\n\n # cost function \n m.cost = pyo.Objective(\n expr = sum((P[i] * (m.x[i,t] - DESIRED_STATE[i]))**2 for i in m.xidx for t in range(N-5,N)), \n sense = pyo.minimize \n ) \n # initial state constraints \n m.init_cons = pyo.Constraint(\n m.xidx, \n rule = lambda m, i: m.x[i,0] == INITIAL_STATE[i]\n ) \n # y > 200\n m.height_cons = pyo.Constraint(\n m.tidx,\n rule = lambda m, t: -m.x[1,t] <= 0\n if t < N else pyo.Constraint.Skip\n )\n # 0 {li_item.text}\\n\"\n \n\n # Display de estudios\n estudiostop = \"\"\n estudiostop1 = soup.find_all(\"div\", {\"class\": \"estudios\"})\n for x in estudiostop1:\n estudiostop += f\"=> {x.text}\\n\"\n\n # Leftbar items\n leftbar = \"\"\n leftbar1= soup.findAll(\"div\", {\"class\":\"leftbar\"})\n for left in leftbar1:\n for left_li in left.find_all(\"li\"):\n leftbar += f\"=> {left_li.text}\\n\"\n\n #get and display all social media with its links \n socialmedia= \"\"\n socialmedia1 = soup.find(\"div\", {\"class\": \"social pull-right\"})\n for social in socialmedia1.find_all('a'):\n socialmedia += \"=>\" + social['href']+ '\\n'\n\n\n #Count all \n\n ContadorA = soup.findAll('a')\n\n #### Prints \n print(\"Navegación: \\n\", hrefnav)\n print(\"Items del menú: \\n\", menutop)\n print(\"Items de estudios: \\n\", estudiostop)\n print(\"Leftbar items: \\n\", leftbar)\n print(\"Social media links: \\n\" , socialmedia)\n print(\"Contador de : \" , str(len(ContadorA)))\n\nestuds()", "sub_path": "Estudios.py", "file_name": "Estudios.py", "file_ext": "py", "file_size_in_byte": 1575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "408964072", "text": "from datetime import time\nfrom typing import List\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nfrom queries.project import Project\nfrom queries.vacancy import Vacancy\n\n\ndef create_work_message(vacancies: List[Vacancy]) -> str:\n message = ''\n for vacancy in vacancies:\n message += f'*{vacancy.date}*\\n' \\\n f'*{vacancy.company}*: [{vacancy.title}]({vacancy.link})\\n\\n'\n return message\n\n\ndef create_freelance_message(projects: List[Project]) -> str:\n message = ''\n for project in projects:\n message += f'*{project.date}*\\n' \\\n f'[{project.title}]({project.link})\\n' \\\n f'*{project.offers_count} Предложений*\\n\\n'\n return message\n\n\ndef start_parser(parser):\n data = parser.parse()\n return data\n\n\ndef get_queried_data(search_query, parsers):\n parsers = [parser(search_query) for parser in parsers]\n\n with ThreadPoolExecutor(max_workers=4) as executor:\n data = executor.map(start_parser, parsers)\n\n vacancies = []\n for nested in data:\n for vacancy in nested:\n vacancies.append(vacancy)\n vacancies.sort(key=lambda x: x.date, reverse=True)\n\n return vacancies\n\n\nif __name__ == '__main__':\n pass\n # print(get_vacancies('Trainee'))\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "queries.vacancy.Vacancy", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "queries.project.Project", "line_number": 17, "usage_type": "name"}, {"api_name": "concurrent.futures.thread.ThreadPoolExecutor", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "249489068", "text": "import os, ConfigParser, itertools\n\nclass ConfigFactory( object ):\n\n def __init__(self, conf, **kwargs):\n self.cpath = conf\n self.config = self._readConf( conf ) \n self.iters = self._getIterators() \n self.keys = self.iters[0]\n self.values = self.iters[1]\n\n ###\n # Methods for external use\n ###\n\n def get( self, sec, opt, dft=None ):\n try:\n option = self.config.get( sec, opt )\n try: \n return eval( option )\n except TypeError:\n return option\n except ConfigParser.NoOptionError:\n return dft\n\n def pop( self, sec, opt, dft=None ):\n value = self.get( sec, opt, dft )\n self.config.remove_option( sec, opt ) \n return value\n\n ###\n # Methods for internal use\n ###\n\n def _makeConf( self, keys, values ):\n new_config = ConfigParser.ConfigParser()\n new_config.optionxform = str\n fmap = { k:v for k,v in zip(keys, values) }\n for sec in self.config.sections():\n if sec == 'Iterators': continue\n if sec == 'Template': continue\n new_sec = sec\n if 'Name' in [ opt for opt,_ in self.config.items( sec ) ]:\n new_sec += self.config.get( sec, 'Name' ).format( **fmap )\n if not new_config.has_section( new_sec ):\n new_config.add_section( new_sec )\n for opt,val in self.config.items( sec ):\n if opt == 'Name': continue\n new_config.set( new_sec, opt.format( **fmap ), val.format( **fmap ) ) \n return new_config \n\n def _readConf( self, conf ):\n config = ConfigParser.ConfigParser()\n config.optionxform = str\n config.read( conf )\n self.template = config.get( 'Loop', 'Template' ) \n config.read( [self.template, conf] )\n return config\n\n def _makeConfigDir( self ):\n self.ConfigDir = self.config.get( 'Loop', 'ConfigDir' ) \n try:\n os.mkdir( self.ConfigDir )\n except OSError:\n pass\n\n def _getIterators( self ):\n self.nested = self.pop( 'Iterators', 'Nested', False )\n names, vals = self[ 'Iterators' ]\n if self.nested:\n vals = [ x for x in itertools.product( *vals ) ]\n else:\n vals = zip( *vals )\n return names, vals\n \n ###\n # Special Python methods\n ###\n\n def __getitem__(self, key ):\n opts, vals = zip( *self.config.items( key ) )\n return opts, [ eval(v) for v in vals ]\n\n def __iter__( self ):\n self.ind = -1 \n self._makeConfigDir()\n self.tag = self.config.get( 'Loop', 'Tag', 'loop_conf' )\n return self\n\n def next( self ):\n self.ind += 1\n if self.ind >= len( self.values ):\n raise StopIteration\n conf = self._makeConf( self.keys, self.values[self.ind] ) \n conf_path = os.path.join( self.ConfigDir, '%s.%d.conf' %(self.tag, self.ind) )\n with open( conf_path, 'wb' ) as f:\n conf.write( f )\n return conf\n", "sub_path": "Tools/ConfigFactory.py", "file_name": "ConfigFactory.py", "file_ext": "py", "file_size_in_byte": 3134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ConfigParser.NoOptionError", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 36, "usage_type": "call"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 53, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}]}
+{"seq_id": "347454840", "text": "# Author : Ttatanepvp123\n# Github : https://github.com/ttatanepvp123\n# License : GPL-3 ( https://www.gnu.org/licenses/gpl-3.0.en.html )\nimport _thread\nimport tempfile\nimport os\nimport time\nimport requests\nimport utils\nimport random\n\nclass indexer():\n # EVENTS\n def onReady(self):\n pass\n def onRequests(self, r : requests.Response):\n pass\n def onError(self, e):\n pass\n\n def addLinks(self, links):\n while self.linksFileIsOpen:\n time.sleep(0.005)\n self.linksFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"a+\") as fp:\n for currentLink in links:\n fp.write(f\"{currentLink}\\n\")\n self.linksFileIsOpen = False\n\n def getLink(self):\n while self.linksFileIsOpen:\n time.sleep(0.005)\n self.linksFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"r\") as fp:#get first link\n link = fp.readline()\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.tmp\", \"w\") as fp:#delete first link (without load file)\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\", \"r\") as fpp:\n for currentLine in fpp:\n if currentLine != link:\n fp.write(currentLine)\n os.remove(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\")\n os.rename(f\"{tempfile.gettempdir()}/{self.numberInstance}/links.tmp\", f\"{tempfile.gettempdir()}/{self.numberInstance}/links.txt\")\n self.linksFileIsOpen = False\n self.linksNumber -= 1\n return link.replace(\"\\n\",\"\")\n \n def addLinkChecked(self, link):\n while self.linksCheckedFileIsOpen:\n time.sleep(0.005)\n self.linksCheckedFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/checked.txt\", \"a+\") as fp:\n fp.write(f\"{link}\\n\")\n self.linksCheckedFileIsOpen = False\n\n def isChecked(self, link):\n returner = False\n while self.linksCheckedFileIsOpen:\n time.sleep(0.005)\n self.linksCheckedFileIsOpen = True\n with open(f\"{tempfile.gettempdir()}/{self.numberInstance}/checked.txt\", \"r\") as fp:\n for currentLink in fp:\n if currentLink.replace(\"\\n\",\"\") == link:\n returner = True\n break\n self.linksCheckedFileIsOpen = False\n return returner\n\n def worker(self, link):\n try:\n if self.isChecked(link):\n self.threadStarted -= 1\n return\n else:\n self.addLinkChecked(link)\n r = requests.get(link, headers=self.headers, timeout=self.timeout)\n self.onRequests(r)\n links = self.getAllLinks(r)\n self.addLinks(links)\n self.linksNumber += len(links)\n except Exception as e:\n self.onError(e)\n self.threadStarted -= 1\n\n def __init__(self, url, threadNumber=5, headers={\"User-Agent\":\"CookieBot/0.2 (+https://slackercompany.ml/CookieBot/)\"}, timeout=10):\n self.getAllLinks = utils.getAllLinks\n self.linksFileIsOpen = False\n self.linksCheckedFileIsOpen = False\n self.threadStarted = 0\n self.headers = headers\n self.timeout = timeout\n self.numberInstance = random.randint(0,99999999)\n os.mkdir(f\"{tempfile.gettempdir()}/{self.numberInstance}/\")\n self.addLinkChecked(\"debug\")\n self.addLinks([url])\n self.linksNumber = 1\n fakeDoWhile = False\n self.onReady()\n while True:\n time.sleep(0.0025)\n if self.threadStarted < threadNumber and self.linksNumber > 0:\n fakeDoWhile = True\n self.threadStarted += 1\n _thread.start_new_thread(self.worker, (self.getLink(),))\n elif fakeDoWhile and self.threadStarted == 0 and self.linksNumber == 0:\n break", "sub_path": "indexer.py", "file_name": "indexer.py", "file_ext": "py", "file_size_in_byte": 4023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.Response", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 34, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 36, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 41, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 42, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 60, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.getAllLinks", "line_number": 85, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 92, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 92, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "552854359", "text": "import matplotlib.pyplot as plt \r\nimport numpy as np\r\n\r\nx=np.linspace(-3,3,50)\r\ny1=2*x+1\r\ny2=x**2\r\n\r\nplt.figure()\r\nplt.plot(x,y1)#第一张图\r\n\r\nplt.figure(figsize=(5,5))\r\nplt.plot(x,y2)#第二张图\r\nplt.plot(x,y1,color='red',linewidth=1.0,linestyle='--')#两条线画一个figure里面\r\nplt.show()", "sub_path": "Matplotlib_ex/plot_ex_2.py", "file_name": "plot_ex_2.py", "file_ext": "py", "file_size_in_byte": 300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.linspace", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "630181633", "text": "from datetime import datetime\nfrom flask_restful import fields\nfrom blueprints import db\n\nclass Places(db.Model):\n __tablename__ = \"places\"\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(255), nullable=False)\n description = db.Column(db.Text, nullable=False)\n place_type = db.Column(db.String(255), nullable=False)\n primary_image = db.Column(db.Text, nullable=False)\n street = db.Column(db.String(255), nullable=False)\n city = db.Column(db.String(255), nullable=False)\n country = db.Column(db.String(255), nullable=False)\n longitude = db.Column(db.Float, nullable=False)\n latitude = db.Column(db.Float, nullable=False)\n deleted = db.Column(db.Boolean, default=False)\n created_at = db.Column(db.DateTime, default=datetime.now())\n updated_at = db.Column(db.DateTime, onupdate=datetime.now())\n\n response_fields = {\n \"id\": fields.Integer,\n \"name\": fields.String,\n \"description\": fields.String,\n \"place_type\": fields.String,\n \"primary_image\": fields.String,\n \"street\": fields.String,\n \"city\": fields.String,\n \"country\": fields.String,\n \"longitude\": fields.Float,\n \"latitude\": fields.Float,\n \"deleted\": fields.Boolean,\n \"created_at\": fields.DateTime,\n \"updated_at\": fields.DateTime\n }\n\n def __init__(\n self,\n name,\n description,\n place_type,\n primary_image,\n street,\n city,\n country,\n longitude,\n latitude\n ):\n self.name = name\n self.description = description\n self.place_type = place_type\n self.primary_image = primary_image\n self.street = street\n self.city = city\n self.country = country\n self.longitude = longitude\n self.latitude = latitude\n\n def __repr__(self):\n return \"\" % self.id\n", "sub_path": "blueprints/places/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "blueprints.db.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "blueprints.db", "line_number": 5, "usage_type": "name"}, {"api_name": "blueprints.db.Column", "line_number": 7, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 7, "usage_type": "name"}, {"api_name": "blueprints.db.Integer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 8, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 8, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 9, "usage_type": "name"}, {"api_name": "blueprints.db.Text", "line_number": 9, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 10, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 10, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 11, "usage_type": "name"}, {"api_name": "blueprints.db.Text", "line_number": 11, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 12, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 12, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 13, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 13, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 14, "usage_type": "name"}, {"api_name": "blueprints.db.String", "line_number": 14, "usage_type": "call"}, {"api_name": "blueprints.db.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 15, "usage_type": "name"}, {"api_name": "blueprints.db.Float", "line_number": 15, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 16, "usage_type": "name"}, {"api_name": "blueprints.db.Float", "line_number": 16, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 17, "usage_type": "name"}, {"api_name": "blueprints.db.Boolean", "line_number": 17, "usage_type": "attribute"}, {"api_name": "blueprints.db.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 18, "usage_type": "name"}, {"api_name": "blueprints.db.DateTime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "blueprints.db.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "blueprints.db", "line_number": 19, "usage_type": "name"}, {"api_name": "blueprints.db.DateTime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_restful.fields.Integer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "flask_restful.fields.String", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_restful.fields.Float", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_restful.fields.Float", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_restful.fields.Boolean", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "flask_restful.fields.DateTime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 33, "usage_type": "name"}, {"api_name": "flask_restful.fields.DateTime", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask_restful.fields", "line_number": 34, "usage_type": "name"}]}
+{"seq_id": "400495010", "text": "import nltk\nimport string\nimport random\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity,euclidean_distances\nfrom nltk.stem import PorterStemmer\nimport operator\nfrom collections import Counter\nimport re\nimport math\nimport re\nimport gensim\nfrom gensim.parsing.preprocessing import remove_stopwords\nimport pandas as pd\nfrom gensim import corpora\nfrom sklearn.metrics.pairwise import cosine_similarity,euclidean_distances,manhattan_distances\nimport numpy as np\nfrom nltk.stem import WordNetLemmatizer\n \nwnl = WordNetLemmatizer()\n\n\ndf=pd.read_csv('chatbot/Faqs_pdeu.csv')\ndf.head()\n\ndef get_euclid(a,b):\n return math.sqrt(sum((a[k] - b[k])**2 for k in set(a.keys()).intersection(set(b.keys()))))\ndef get_man(a,b):\n return (sum((a[k] - b[k])**2 for k in set(a.keys()).intersection(set(b.keys()))))\ndef get_cosine(vec1, vec2):\n intersection = set(vec1.keys()) & set(vec2.keys())\n numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())])\n sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())])\n denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\n if not denominator:\n return 0.0\n else:\n return float(numerator) / denominator\n\ndef text_to_vector(text):\n words = WORD.findall(text)\n return Counter(words)\n\n\nWORD = re.compile(r\"\\w+\")\nf=open('chatbot/pdeu.txt','r',encoding='utf-8',errors='ignore')\nraw=f.read()\nraw=raw.lower()\n#print(raw)\nsent_tokens=nltk.sent_tokenize(raw)\n# print(sent_tokens)\nsent_tokens=[x.replace('\\n','') for x in sent_tokens]\n#print('------sent_tokens-----')\n#print(sent_tokens)\n\nword_tokens=nltk.word_tokenize(raw)\nlemmer=nltk.stem.WordNetLemmatizer()\n#print(sent_tokens)\n#print(len(sent_tokens))\n\ndef lemmatize(tokens):\n return [lemmer.lemmatize(token) for token in tokens]\nremove_punct_dict=dict((ord(punct),None) for punct in string.punctuation)\ndef normalize(text):\n return lemmatize(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))\n\n\ndef greet(sent):\n greet_resp=[\"hello welcome!!\",\"hi how are you?\",\"Pleasure to hear from you!!!\",\"Hello sir\",\"nice to meet you sir!!!\",\"What can I do for you?\"]\n greet_inp=['hii','heyaa','hello','hey there',\"hi\",\"hey\",\"hello\",\"howdy\",\"how are you?\"]\n if sent in [\"good morning\",\"good afternoon\",\"good evening\"]:\n return f\"hello , {sent}\"\n if sent==\"good night\":\n return \"good night\"\n\n if(sent[-1]=='?'):\n sent=sent[:-1]\n ps = PorterStemmer()\n arr=sent.split(' ')\n arr=[ps.stem(i) for i in arr]\n print('\\n\\n----------------------------------',arr,'\\n\\n')\n if('see' and 'you') in arr:\n return 'Talk to you Later'\n elif 'goodby' in arr or 'bye' in arr:\n return 'Good Bye :)'\n elif 'accredit' in arr and 'colleg' in arr:\n return 'Yes'\n elif 'instal' in arr and 'fee' in arr and 'pay' in arr:\n return 'Yes You can pay fees in two installmensts'\n elif 'hour' in arr and ('work' in arr or 'oper' in arr):\n return 'We are open 9:00am-4:00pm Monday-friday!'\n elif ('field' in arr or 'branch' in arr) and 'different' in arr and 'colleg' in arr:\n return '\"Petroleum Technology-120,Mechanical Engineering-120,Electrical Engineering-120,Civil Engineering-120,Chemical Engineering-120,Computer Science-60,Information and Communication Technology-60\".'\n elif ('cse' in arr or 'mechan' in arr or 'chemica' in arr or 'electr' in arr or 'comput' in arr or 'scienc' in arr or 'inform' or 'commun' in arr or 'technolg' in arr or 'petroleum' in arr) and 'subject' in arr:\n return 'You can check all this course related information from our website !'\n elif 'payment' in arr and 'fee' in arr and 'avail' in arr:\n return 'cheque,debit card,netbanking,credit card are acceptable. NEFT is preferable'\n elif 'is' in arr and 'transportation' in arr and 'avail' in arr:\n return 'Yes , bus service is available.'\n elif 'hostel' in arr and 'facil' in arr and 'avail' in arr:\n return 'Yes! we provide telephone , internet , AC , first-aid , reading , dining , security all this facility in hostel'\n elif 'transportation' in arr and 'fee' in arr:\n return 'transportaion fees of our college is 10500 per semester'\n elif 'semest'in arr and 'fee' in arr:\n return 'fees of our college is 110000 per semester!'\n elif 'chairman' in arr and 'who' in arr and 'colleg' in arr:\n return 'Mukesh Ambani is chairman of our college'\n elif 'is' in arr and 'under' in arr and 'gtu' in arr:\n return 'No, our college doesnt come under GTU.'\n elif 'scholarship' in arr and 'criteria' in arr:\n return 'you can check out at :: https://www.pdpu.ac.in/downloads/Financial%20Assistance%202019.pdf'\n\n for word in sent.split():\n if word.lower() in greet_inp:\n return random.choice(greet_resp)\n return None\n\n#Searching in file\n# Response for searching in file using TF-IDF\ndef resp(user_inp):\n ans,ind,hue=[],[],3\n tfidvec=TfidfVectorizer(tokenizer=normalize,stop_words='english')\n tfid=tfidvec.fit_transform(sent_tokens)\n\n vals=cosine_similarity(tfid[-1],tfid)\n d={}\n for i in range(0,len(vals[0])):\n \td[i]=vals[0][i]\n sorted_d = dict( sorted(d.items(), key=operator.itemgetter(1),reverse=True))\n for (key,val) in sorted_d.items():\n \tif(hue>0 and val>0):\n \t\tind.append(key)\n \telse:\n \t\tbreak\n \thue-=1\n flat=vals.flatten()\n \n flat=sorted(flat,reverse=True)\n req_tfid=flat[0]\n if(req_tfid==0):\n ans=ans+\"I am sorry! I don't understand you\" \n else:\n for index in ind: \n ans.append(sent_tokens[index])\n ans1=''\n for statements in ans:\n ans1=ans1+str(statements)\n ans1+='\\n'\n return ans1\n\ndef clean_sent(sent,stopwords=False):\n sent=sent.lower().strip()\n sent=re.sub(r'[^a-z0-9\\s]','',sent)\n if stopwords:\n sent=remove_stopwords(sent)\n return sent \n\ndef get_clean_sent(df,stopwords=False):\n sents=df[['Questions']]\n cleaned_sent=[]\n for index,row in df.iterrows():\n cleaned=clean_sent(row['Questions'],stopwords)\n cleaned=cleaned.lower()\n cleaned_sent.append(\" \".join([wnl.lemmatize(i) for i in cleaned.split()]))\n return cleaned_sent\n\n#Glove model\ndef getwordvec(word,model):\n samp=model['computer']\n sample_len=len(samp)\n vec=[0]*sample_len\n try:\n vec=model[word]\n except:\n vec=[0]*sample_len\n return vec\n\ndef getphrase(phrase,embeddingmodel):\n samp=getwordvec('computer',embeddingmodel)\n vec=np.array([0]*len(samp))\n den=0\n for word in phrase.split():\n den+=1\n vec=vec+np.array(getwordvec(word,embeddingmodel))\n return vec.reshape(1,-1)\n\ndef glove(question,cleaned_sent,param):\n google_model=gensim.models.KeyedVectors.load('chatbot/w2vecmodel.mod')\n sent_embedings=[]\n try_flag=False\n for sent in cleaned_sent:\n sent_embedings.append(getphrase(sent,google_model))\n ques_em=getphrase(question,google_model)\n max_sim=-1\n index_sim=-1\n try:\n for index,faq_em in enumerate(sent_embedings):\n if(param=='cosine'):\n sim=cosine_similarity(faq_em,ques_em)[0][0]\n if(param=='euclid'):\n sim=euclidean_distances(faq_em,ques_em)[0][0]\n if(param=='man'):\n sim=manhattan_distances(faq_em,ques_em)[0][0] \n if(sim>max_sim):\n max_sim=sim\n index_sim=index\n try_flag=True\n ans=df.iloc[index_sim,1]\n return ans,try_flag\n except Exception as e:\n return 0,try_flag\n\n\n#Response for bagofwords approach\ndef resp1(ques,param):\n cleaned_sent=get_clean_sent(df,stopwords=True)\n sentences=cleaned_sent\n sent_words=[[wrd for wrd in document.split()]for document in sentences]\n dictionary=corpora.Dictionary(sent_words)\n bow_corpus=[dictionary.doc2bow(text) for text in sent_words]\n ques=clean_sent(ques,stopwords=True)\n #print(ques)\n ques_em=dictionary.doc2bow(ques.split())\n #print(ques_em)\n ans,try_flag=glove(ques,cleaned_sent,param)\n #print('Returned ans :: ',ans)\n #print('try_flag :: ',try_flag)\n if try_flag:\n return ans\n return retrieve(ques_em,bow_corpus,df,sentences,ques,param)\n\n\ndef retrieve(ques_em,sent_em,df,sent,user_inp,param):\n max_sim=-1\n index_sim=-1\n try:\n for index,faq_em in enumerate(sent_em):\n if(param=='cosine'):\n sim=cosine_similarity(faq_em,ques_em)[0][0]\n if(param=='euclid'):\n sim=euclidean_distances(faq_em,ques_em)[0][0]\n if(param=='man'):\n sim=manhattan_distances(faq_em,ques_em)[0][0] \n if(sim>max_sim):\n max_sim=sim\n index_sim=index\n ans3=df.iloc[index_sim,1]\n return ans3\n except Exception as e:\n pass\n ans1=resp(user_inp)\n ans2=search_google(user_inp)\n cos1,cos2=0,0\n inp=text_to_vector(user_inp)\n cos1=get_cosine(inp,text_to_vector(ans1))\n cos2=get_cosine(inp,text_to_vector(ans2))\n if(cos1>=cos2):\n return ans1\n return ans2\n\ndef get_bot_resp(user_inp,param):\n flag=False\n while(1):\n ans=greet(user_inp.lower())\n print(\"got ans for query\",ans,user_inp)\n if(user_inp=='what are branches in sot'):\n ans=\"Following are the branches : Electrical,Chemical,Mechanical,Civil,Computer,ICT\"\n flag=True\n return ans,flag\n if(user_inp=='is there hostel facility in pdeu'):\n ans=\"Yes there is hostel facility in pdeu\"\n flag=True\n return ans,flag\n if(user_inp=='average fee per year'):\n ans='Average Fees 2,43,250 ruppes per year'\n flag=True\n return ans,flag\n if(ans!=None):\n flag=True\n return ans,flag\n return resp1(user_inp.lower(),param),flag\n\n\n\n", "sub_path": "chatbot/chatbot.py", "file_name": "chatbot.py", "file_ext": "py", "file_size_in_byte": 9965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 48, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 53, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 60, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 60, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 66, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 81, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 128, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 132, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 156, "usage_type": "call"}, {"api_name": "gensim.parsing.preprocessing.remove_stopwords", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors.load", "line_number": 191, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 191, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 204, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.manhattan_distances", "line_number": 206, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 222, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 222, "usage_type": "name"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 242, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.manhattan_distances", "line_number": 246, "usage_type": "call"}]}
+{"seq_id": "285238353", "text": "import time\nimport requests\nfrom pkg_resources import parse_version\n\nimport click\nimport gevent\nfrom gevent.event import AsyncResult\nfrom gevent.queue import Queue\nimport structlog\n\nfrom raiden.exceptions import RaidenShuttingDown\nfrom raiden.utils import get_system_spec\n\nCHECK_VERSION_INTERVAL = 3 * 60 * 60\nLATEST = 'https://api.github.com/repos/raiden-network/raiden/releases/latest'\nRELEASE_PAGE = 'https://github.com/raiden-network/raiden/releases'\n\nREMOVE_CALLBACK = object()\nlog = structlog.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef check_version():\n \"\"\"Check every 3h for a new release\"\"\"\n app_version = parse_version(get_system_spec()['raiden'])\n while True:\n try:\n content = requests.get(LATEST).json()\n # getting the latest release version\n latest_release = parse_version(content['tag_name'])\n # comparing it to the user's application\n if app_version < latest_release:\n msg = \"You're running version {}. The latest version is {}\".format(\n app_version,\n latest_release,\n )\n click.secho(msg, fg='red')\n click.secho(\"It's time to update! Releases: {}\".format(RELEASE_PAGE), fg='red')\n except requests.exceptions.HTTPError as herr:\n click.secho('Error while checking for version', fg='red')\n print(herr)\n except ValueError as verr:\n click.secho('Error while checking the version', fg='red')\n print(verr)\n finally:\n # repeat the process once every 3h\n gevent.sleep(CHECK_VERSION_INTERVAL)\n\n\nclass AlarmTask(gevent.Greenlet):\n \"\"\" Task to notify when a block is mined. \"\"\"\n\n def __init__(self, chain):\n super().__init__()\n self.callbacks = list()\n self.stop_event = AsyncResult()\n self.chain = chain\n self.last_block_number = None\n self.response_queue = Queue()\n\n # TODO: Start with a larger wait_time and decrease it as the\n # probability of a new block increases.\n self.wait_time = 0.5\n self.last_loop = time.time()\n\n def register_callback(self, callback):\n \"\"\" Register a new callback.\n\n Note:\n The callback will be executed in the AlarmTask context and for\n this reason it should not block, otherwise we can miss block\n changes.\n \"\"\"\n if not callable(callback):\n raise ValueError('callback is not a callable')\n\n self.callbacks.append(callback)\n\n def remove_callback(self, callback):\n \"\"\"Remove callback from the list of callbacks if it exists\"\"\"\n if callback in self.callbacks:\n self.callbacks.remove(callback)\n\n def _run(self): # pylint: disable=method-hidden\n self.last_block_number = self.chain.block_number()\n log.debug('starting block number', block_number=self.last_block_number)\n\n sleep_time = 0\n while self.stop_event.wait(sleep_time) is not True:\n try:\n self.poll_for_new_block()\n except RaidenShuttingDown:\n break\n\n # we want this task to iterate in the tick of `wait_time`, so take\n # into account how long we spent executing one tick.\n self.last_loop = time.time()\n work_time = self.last_loop - self.last_loop\n if work_time > self.wait_time:\n log.warning(\n 'alarm loop is taking longer than the wait time',\n work_time=work_time,\n wait_time=self.wait_time,\n )\n sleep_time = 0.001\n else:\n sleep_time = self.wait_time - work_time\n\n # stopping\n self.callbacks = list()\n\n def poll_for_new_block(self):\n chain_id = self.chain.network_id\n current_block = self.chain.block_number()\n\n if current_block > self.last_block_number + 1:\n difference = current_block - self.last_block_number - 1\n log.error('alarm missed %s blocks' % (difference), current_block=current_block)\n\n if current_block != self.last_block_number:\n log.debug(\n 'new block',\n number=current_block,\n timestamp=self.last_loop,\n )\n\n self.last_block_number = current_block\n remove = list()\n for callback in self.callbacks:\n result = callback(current_block, chain_id)\n if result is REMOVE_CALLBACK:\n remove.append(callback)\n\n for callback in remove:\n self.callbacks.remove(callback)\n\n def stop_async(self):\n self.stop_event.set(True)\n", "sub_path": "raiden/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 4787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "structlog.get_logger", "line_number": 19, "usage_type": "call"}, {"api_name": "pkg_resources.parse_version", "line_number": 24, "usage_type": "call"}, {"api_name": "raiden.utils.get_system_spec", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pkg_resources.parse_version", "line_number": 29, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 36, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 38, "usage_type": "attribute"}, {"api_name": "click.secho", "line_number": 39, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 42, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "gevent.Greenlet", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gevent.event.AsyncResult", "line_number": 55, "usage_type": "call"}, {"api_name": "gevent.queue.Queue", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "raiden.exceptions.RaidenShuttingDown", "line_number": 91, "usage_type": "name"}, {"api_name": "time.time", "line_number": 96, "usage_type": "call"}]}
+{"seq_id": "128229949", "text": "# Copyright 2021 Open Robotics (2021)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom ament_index_python.packages import get_package_share_directory\n\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.actions import ExecuteProcess, IncludeLaunchDescription, RegisterEventHandler\nfrom launch.event_handlers import OnProcessExit\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\n\nimport xacro\nimport yaml\n\n\ndef load_file(package_name, file_path):\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n try:\n with open(absolute_file_path, 'r') as file:\n return file.read()\n except EnvironmentError:\n # parent of IOError, OSError *and* WindowsError where available\n return None\n\n\ndef load_yaml(package_name, file_path):\n package_path = get_package_share_directory(package_name)\n absolute_file_path = os.path.join(package_path, file_path)\n try:\n with open(absolute_file_path, 'r') as file:\n return yaml.safe_load(file)\n except EnvironmentError:\n # parent of IOError, OSError *and* WindowsError where available\n return None\n\n\ndef generate_launch_description():\n # moveit_cpp.yaml is passed by filename for now since it's node specific\n ur10_gazebo = os.path.join(\n get_package_share_directory('ur10_gazebo'),\n 'worlds',\n 'ur10.world')\n\n gazebo = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([os.path.join(\n get_package_share_directory('gazebo_ros'), 'launch'), '/gazebo.launch.py']),\n launch_arguments={'world': ur10_gazebo}.items(),\n )\n\n ur10_description_path = os.path.join(\n get_package_share_directory('ur10_description'))\n\n xacro_file = os.path.join(ur10_description_path,\n 'urdf',\n 'ur10_robot.urdf.xacro')\n\n doc = xacro.parse(open(xacro_file))\n xacro.process_doc(doc)\n robot_description_config = doc.toxml()\n robot_description = {'robot_description': robot_description_config}\n\n node_robot_state_publisher = Node(\n package='robot_state_publisher',\n executable='robot_state_publisher',\n output='screen',\n parameters=[robot_description]\n )\n\n spawn_entity = Node(package='gazebo_ros', executable='spawn_entity.py',\n arguments=['-topic', 'robot_description',\n '-entity', 'ur10'],\n output='screen')\n\n load_joint_state_controller = ExecuteProcess(\n cmd=['ros2', 'control', 'load_start_controller', 'joint_state_controller'],\n output='screen'\n )\n\n load_joint_trajectory_controller = ExecuteProcess(\n cmd=['ros2', 'control', 'load_start_controller', 'joint_trajectory_controller'],\n output='screen'\n )\n\n # Static TF\n #static_tf = Node(package='tf2_ros',\n # executable='static_transform_publisher',\n # name='static_transform_publisher',\n # output='log',\n # arguments=['0.0', '0.0', '0.65', '0.0', '0.0', '0.0', 'world', 'ur_base'])\n\n return LaunchDescription([\n RegisterEventHandler(\n event_handler=OnProcessExit(\n target_action=spawn_entity,\n on_exit=[load_joint_state_controller],\n )\n ),\n RegisterEventHandler(\n event_handler=OnProcessExit(\n target_action=load_joint_state_controller,\n on_exit=[load_joint_trajectory_controller],\n )\n ),\n gazebo,\n node_robot_state_publisher,\n #static_tf,\n spawn_entity\n ])\n", "sub_path": "ur10_gazebo/launch/ur10_gazebo.launch.py", "file_name": "ur10_gazebo.launch.py", "file_ext": "py", "file_size_in_byte": 4286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 54, "usage_type": "call"}, {"api_name": "launch.actions.IncludeLaunchDescription", "line_number": 58, "usage_type": "call"}, {"api_name": "launch.launch_description_sources.PythonLaunchDescriptionSource", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ament_index_python.packages.get_package_share_directory", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "xacro.parse", "line_number": 71, "usage_type": "call"}, {"api_name": "xacro.process_doc", "line_number": 72, "usage_type": "call"}, {"api_name": "launch_ros.actions.Node", "line_number": 76, "usage_type": "call"}, {"api_name": "launch_ros.actions.Node", "line_number": 83, "usage_type": "call"}, {"api_name": "launch.actions.ExecuteProcess", "line_number": 88, "usage_type": "call"}, {"api_name": "launch.actions.ExecuteProcess", "line_number": 93, "usage_type": "call"}, {"api_name": "launch.LaunchDescription", "line_number": 105, "usage_type": "call"}, {"api_name": "launch.actions.RegisterEventHandler", "line_number": 106, "usage_type": "call"}, {"api_name": "launch.event_handlers.OnProcessExit", "line_number": 107, "usage_type": "call"}, {"api_name": "launch.actions.RegisterEventHandler", "line_number": 112, "usage_type": "call"}, {"api_name": "launch.event_handlers.OnProcessExit", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "367111044", "text": "from numpy import array\nfrom keras.models import Sequential\nfrom keras.layers import Dense,LSTM\n\n\n#1. 데이터\nx=array([[1,2,3],[2,3,4],[3,4,5],[4,5,6]]) #4행 3열\ny=array([4,5,6,7]) #(4,)--->스칼라가 4개짜리 벡터1개 (4,)!=(4,1) 절대 (4,1)이라고 하면 안된다. input_dim=1(일차원)\n# y2=array([[4,5,6,7]]) #(1,4)\n# y3=array([[4],[5],[6],[7]]) #(4,1)\n\nprint(\"x.shape:\",x.shape) #(4,3)\nprint(\"y.shape:\",y.shape) #(4,) --->(4,1)이라고 하면 에러 난다. \n#shape해서 확인해보기!!\n#자르는 숫자 명시 ex)4x3x1--> (4,3)을 1개씩 연속된 데이터 계산하겠다(1개씩 작업) (행, 열, 몇개로 자를건지)\n\n# x=x.reshape(4,3,1) #전체 데이터는 변경되지 않는다. \n# reshape할 때 검사는 곱하기! (4*3)=(4*3*1)\nx=x.reshape(x.shape[0],x.shape[1],1) # x.shape[0]=4, x.shape[1]=3 \n#위에 식이나 아래식이나 결과는 동일하나 정석은 두번째꺼가 맞는 것!\n\nprint(\"x:\",x.shape)\nprint(\"x:\",x)\n\n#2. 모델구성\n# LSTM은 DENSE모델에 비해 많은 연산을 하게 된다. \nmodel=Sequential()\nmodel.add(LSTM(10,activation='relu',input_shape=(3,1))) #시계열 input_shape=(3,1) ***행 무시***, LSTM에서 중요한 것: 컬럼의 개수와 몇개씩 잘라서 계산할 것이냐, 행은 중요하지 않다\n#여기서부터는 Dense모델\nmodel.add(Dense(5))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(50))\nmodel.add(Dense(15))\nmodel.add(Dense(15))\nmodel.add(Dense(1)) #하나 예측 y=[4,5,6,7]\n\nmodel.summary() #param[1]=480\n# 이유: 1*10-->input/ 1*10--->bias/10*10--->역전파------->합쳐서 100+10+10=120////120*4=480\n\n\"\"\"\n#과제1\n#param 이 왜 480나오는 지 찾아보기\n#input_shape는 (3,1)밖�� 안들어갔는데 왜 480이 나올까\n\"\"\"\n#3. 실행\nmodel.compile(optimizer='adam',loss='mse') #metrics하나 안하나 상관없다.\nmodel.fit(x,y,epochs=300,batch_size=1)\n\nx_input=array([5,6,7]) #(3,) 와꾸가 안맞음--->(1,3,1)로 변환 (행, 열, 몇개로 쪼갤건지)\nx_input=x_input.reshape(1,3,1)\nprint(x_input)\n\nyhat=model.predict(x_input)\nprint(yhat)\n##정확하게 예측이 안된다. LSTM너무 적어서 , 수정할 수 있는 부분 수정\n\n\n\n#예제\n# x=array([[1,2,3],[1,2,3]]) #(2,3)\n# print(x.shape)\n# y=array([[[1,2],[4,2]],[[4,5],[5,6]]]) #(덩어리 개수, 개수, 제일 작은 단위) #작은거부터 치고 올라가기\n# print(y.shape)\n# z=array([[[1],[2],[3]],[[4],[5],[6]]])\n# print(z.shape)\n\n# w=array([[[1,2,3,4]]])\n# print(w.shape)\n# k=array([[[[1],[2]]],[[[3],[4]]]])\n# print(k.shape)\n###스칼라 벡터 행렬 텐서\n\n", "sub_path": "keras/keras29_lstm.py", "file_name": "keras29_lstm.py", "file_ext": "py", "file_size_in_byte": 2606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "323144281", "text": "######################################################################\n######### DNA ALIQUOTING FOR 2 CRYO VIAL TUBE PLATES #################\n######################################################################\nfrom opentrons import protocol_api\nimport pandas as pd\nimport numpy as np\nimport os\n\n# metadata\nmetadata = {\n 'protocolName': 'DNA ALIQUOTING',\n 'author': 'Name ',\n 'description': 'DNA ALIQUOTING for Opentrons',\n 'apiLevel': '2.10'\n}\nwater_run = False\npositions = ['A1','A2','A3','A4','A5','A6','A7','A8','A9','A10','A11','A12']\n# positions = ['A1','A2','A3']\nDNA_volume = 200.0\n\n# protocol run function. the part after the colon lets your editor know\n# where to look for autocomplete suggestions\ndef run(protocol: protocol_api.ProtocolContext):\n #### DEFINE LABWARE\n #### TIP RACKS\n tip_rack1_300ul = protocol.load_labware('opentrons_96_tiprack_300ul', '1')\n\n #### PLATES\n dna_source_plate = protocol.load_labware('nest_96_wellplate_2ml_deep_on_chemagic_stand', '2')\n cryo_vial_tubes_1 = protocol.load_labware('nunc_cryo_vial_tubes', '5')\n cryo_vial_tubes_2 = protocol.load_labware('nunc_cryo_vial_tubes', '8')\n\n #### PIPETTES\n right_pipette = protocol.load_instrument(\n 'p300_multi_gen2', mount='right', tip_racks=[tip_rack1_300ul])\n\n def dna_aliquot():\n i = 0\n for j in range(len(positions)):\n comment = 'COLUMN NO: ' + str(i+1)\n protocol.comment(comment)\n right_pipette.pick_up_tip()\n # 1ST ALIQUOT\n right_pipette.aspirate(volume=DNA_volume,\n location=dna_source_plate[positions[i]].bottom(2),\n rate=0.3)\n protocol.delay(seconds=2)\n right_pipette.touch_tip()\n right_pipette.air_gap(2)\n\n right_pipette.dispense(volume=DNA_volume,\n location=cryo_vial_tubes_1[positions[i]].bottom(4),\n rate=0.3)\n right_pipette.blow_out(location=cryo_vial_tubes_1[positions[i]].bottom(9))\n protocol.delay(seconds=1)\n\n # 2ND ALIQUOT\n right_pipette.aspirate(volume=DNA_volume,\n location=dna_source_plate[positions[i]].bottom(1.25),\n rate=0.3)\n protocol.delay(seconds=2)\n right_pipette.touch_tip()\n right_pipette.air_gap(2)\n\n right_pipette.dispense(volume=DNA_volume,\n location=cryo_vial_tubes_2[positions[i]].bottom(2),\n rate=0.3)\n right_pipette.blow_out(location=cryo_vial_tubes_2[positions[i]].bottom(9))\n protocol.delay(seconds=1)\n\n if water_run:\n right_pipette.return_tip(home_after=False)\n else:\n right_pipette.drop_tip(home_after=False)\n i = i + 1\n\n\n def flashing_lights():\n for i in range(5):\n protocol.set_rail_lights(True)\n protocol.delay(seconds=0.5)\n protocol.set_rail_lights(False)\n protocol.delay(seconds=0.5)\n\n ### COMMANDS ####\n protocol.set_rail_lights(True)\n dna_aliquot()\n flashing_lights()\n protocol.set_rail_lights(True)\n", "sub_path": "OT2CEP20210331B09_DNA_EXT/Protocols/DNA_ALIQUOTING/DNA_ALIQUOTING_V1.0.py", "file_name": "DNA_ALIQUOTING_V1.0.py", "file_ext": "py", "file_size_in_byte": 3332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "opentrons.protocol_api.ProtocolContext", "line_number": 23, "usage_type": "attribute"}, {"api_name": "opentrons.protocol_api", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "207903835", "text": "import prepare_sigmorphon_data\nimport common\nimport codecs\n\nBEGIN_WORD = ''\nEND_WORD = ''\nNULL = '%'\n\ndef main():\n #train_path = '/Users/roeeaharoni/research_data/sigmorphon2016-master/data/german-task1-train'\n #test_path = '/Users/roeeaharoni/research_data/sigmorphon2016-master/data/german-task1-dev'\n\n train_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_train.txt.sigmorphon_format'\n test_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_test.txt.sigmorphon_format'\n dev_path = '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_dev.txt.sigmorphon_format'\n\n convert_sigmorphon_to_morphtrans(train_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_train.txt.morphtrans_format.txt')\n convert_sigmorphon_to_morphtrans(test_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_test.txt.morphtrans_format.txt', False)\n convert_sigmorphon_to_morphtrans(dev_path, '/Users/roeeaharoni/research_data/morphology/wiktionary-morphology-1.1/base_forms_de_noun_dev.txt.morphtrans_format.txt', False)\n\ndef convert_sigmorphon_to_morphtrans(sig_file, morphtrans_file, create_alphabet = True):\n\n (words, lemmas, feat_dicts) = prepare_sigmorphon_data.load_data(sig_file)\n alphabet, feats = prepare_sigmorphon_data.get_alphabet(words, lemmas, feat_dicts)\n alphabet.append(BEGIN_WORD)\n alphabet.append(END_WORD)\n\n if create_alphabet:\n with codecs.open(morphtrans_file + '.word_alphabet', \"w\", encoding='utf8') as alphabet_file:\n alphabet_file.write(' '.join([c for c in list(alphabet) if len(c) < 2]) + ' ' + END_WORD + ' '\n + BEGIN_WORD)\n\n morph2feats = common.cluster_data_by_morph_type(feat_dicts, feats)\n with codecs.open(morphtrans_file + '.morph_alphabet', \"w\", encoding='utf8') as alphabet_file:\n alphabet_file.write(' '.join([key for key in morph2feats.keys()]))\n\n with codecs.open(morphtrans_file, \"w\", encoding='utf8') as output_file:\n for lemma, word, dict in zip(lemmas, words, feat_dicts):\n # a b g a s k l a p p e | a b g a s k l a p p e |case=nominative:number=singular\n output_file.write(BEGIN_WORD + ' ' + ' '.join(list(lemma)) + ' ' + END_WORD + '|' + BEGIN_WORD + ' ' +\n ' '.join(list(word)) + ' ' + END_WORD + '|' + get_morph_string(dict, feats) + '\\n')\n return\n\ndef get_morph_string(feat_dict, feats):\n s = ''\n for f in sorted(feats):\n if f in feat_dict:\n s += f + '=' + feat_dict[f] + ':'\n else:\n s += f + '=' + NULL + ':'\n return s[:-1]\n\nif __name__ == '__main__':\n main()", "sub_path": "src/sigmorphon2morphtrans.py", "file_name": "sigmorphon2morphtrans.py", "file_ext": "py", "file_size_in_byte": 2851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "prepare_sigmorphon_data.load_data", "line_number": 23, "usage_type": "call"}, {"api_name": "prepare_sigmorphon_data.get_alphabet", "line_number": 24, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 29, "usage_type": "call"}, {"api_name": "common.cluster_data_by_morph_type", "line_number": 33, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 34, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "60976605", "text": "# from keras.datasets import mnist\n#\n# (train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n#\n# print(train_images.ndim)\n# print(train_images.shape)\n# print(train_images.dtype)\n#\n# digit = train_images[4]\n#\n# import matplotlib.pyplot as plt\n#\n# my_slice = train_images[10:100]\n# print(my_slice.shape)\n#\n# # my_slice2 = train_images[:, 14:, 14:]\n# # print(my_slice2.shape)\n#\n# def naive_relu(x):\n# assert len(x.shape) == 2\n# x = x.copy() #입력텐서를 바꾸지 않도록 복사\n# for i in range(x.shape[0]):\n# for j in range(x.shape[1]):\n# x[i, j] = max(x[i, j], 0)\n# return x\n#\n# def naive_add(x, y):\n# assert len(x.shape) == 2\n# assert x.shape == y.shape\n#\n# x = x.copy()\n# for i in range(x.shape[0]):\n# for j in range(x.shape[1]):\n# x[i, j] += y[i, j]\n# return x\n\nimport numpy as np\nimport os\nimport time\n\nos.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'\nimport keras\nimport keras.applications as kapp\nfrom keras.datasets import cifar10\n\n(x_train, y_train_cats), (x_test, y_test_cats) = cifar10.load_data()\nbatch_size = 8\nx_train = x_train[:batch_size]\nx_train = np.repeat(np.repeat(x_train, 7, axis=1), 7, axis=2)\nprint('1')\nmodel = kapp.VGG19()\nmodel.compile(optimizer='sgd', loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nprint(\"Running initial batch (compiling tile program)\")\ny = model.predict(x=x_train, batch_size=batch_size)\n\n# Now start the clock and run 10 batches\nprint(\"Timing inference...\")\nstart = time.time()\nfor i in range(10):\n y = model.predict(x=x_train, batch_size=batch_size)\n print(i)\nprint(\"Ran in {} seconds\".format(time.time() - start))", "sub_path": "DeepLearning/mnist.py", "file_name": "mnist.py", "file_ext": "py", "file_size_in_byte": 1706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "keras.datasets.cifar10.load_data", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.datasets.cifar10", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.repeat", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.applications.VGG19", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 51, "usage_type": "name"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "646745512", "text": "'''\n2020-9-26 caoxinzi\n'''\n\n'''------------------图像可视化(查看不同step的图像)------------------'''\n\nfrom torchvision import datasets\nimport torchvision.transforms as transform\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch\n\nwriter = SummaryWriter(log_dir='./log')\nnum_workers = 0\n\n#每批加载16张图片\nbatch_size =16\n\n#percentage of training set to use as validation\nvalid_size = 0.2\n\n#将数据转换为torch.FloatTensor,并标准化\ntransform = transform.Compose([\n transform.ToTensor(),\n transform.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))\n])\n\n#选择训练数据集和测试数据集\ntrain_data = datasets.CIFAR10('data', train=True, download=True, transform=transform)\ntest_data = datasets.CIFAR10('data', train=False, download=True, transform=transform)\n\n#将训练数据集中划分验证集\nnum_train = len(train_data)\nindices = list(range(num_train))\nnp.random.shuffle(indices)\nsplit = int(np.floor(valid_size * num_train))\ntrain_idx, valid_idx = indices[split:],indices[:split]\n\n#define samples for obtaining training and validation batches\ntrain_sampler = SubsetRandomSampler(train_idx)\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n#prepare data loaders(combine dataset and sampler)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers)\n\nvalid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers)\n\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)\n\n#对训练数据进行可视化\nimgs, labels = iter(train_data).__next__()\n\nfor i in range(len(imgs)):\n # 乘以偏差\n img = imgs[i].mul(torch.Tensor(np.array([0.5, 0.5, 0.5]).reshape(-1,1,1)))\n \n #加上均值\n img = imgs[i].add(torch.Tensor(np.array([0.5, 0.5, 0.5]).reshape(-1,1,1)))\n \n #加入图像数据\n writer.add_image('input',img,i+1)\n\n", "sub_path": "8_1 Pytorch Visualization.py", "file_name": "8_1 Pytorch Visualization.py", "file_ext": "py", "file_size_in_byte": 2043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.SubsetRandomSampler", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.SubsetRandomSampler", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "63529545", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n AipBase\n\"\"\"\nimport hmac\nimport json\nimport hashlib\nimport http\nimport datetime\nimport base64\nimport time\nfrom urllib import urlencode\nfrom urllib import quote\nfrom urlparse import urlparse\n\nclass AipBase:\n \"\"\"\n AipBase\n \"\"\"\n\n __accessTokenUrl = 'https://aip.baidubce.com/oauth/2.0/token'\n\n __scopes = set([\n 'vis-ocr_ocr',\n 'vis-ocr_bankcard',\n 'vis-faceattribute_faceattribute',\n 'nlp_wordseg',\n 'nlp_simnet',\n 'nlp_wordemb',\n 'nlp_comtag',\n 'nlp_wordpos',\n 'nlp_dnnlm_cn',\n 'vis-antiporn_antiporn_v2',\n 'audio_voice_assistant_get',\n 'audio_tts_post',\n 'vis-faceverify_faceverify',\n ])\n\n def __init__(self, appId, apiKey, secretKey):\n \"\"\"\n AipBase(appId, apiKey, secretKey)\n \"\"\"\n\n self._appId = appId.strip()\n self._apiKey = apiKey.strip()\n self._secretKey = secretKey.strip()\n self._authObj = {}\n self._isCloudUser = None\n\n def _request(self, url, data):\n \"\"\"\n self._request('', {})\n \"\"\"\n\n authObj = self._auth()\n headers = self._getAuthHeaders('POST', url)\n params = self._getParams(authObj)\n\n response = http.post(url, data=data, params=params, headers=headers)\n obj = self._proccessResult(response)\n\n if not self._isCloudUser and obj.get('error_code', '') == 110:\n authObj = self._auth(True)\n params = self._getParams(authObj)\n response = http.post(url, data=data, params=params, headers=headers)\n obj = self._proccessResult(response)\n\n return obj\n\n def _proccessResult(self, content):\n \"\"\"\n formate result\n \"\"\"\n\n return json.loads(content) or {}\n\n def _auth(self, refresh=False):\n \"\"\"\n api access auth\n \"\"\"\n \n if len(self._apiKey) == 32 or self._isCloudUser == True:\n self._isCloudUser = True\n return\n\n #未过期\n if not refresh:\n tm = self._authObj.get('time', 0) + int(self._authObj.get('expire_in', 0)) - 30\n if tm > int(time.time()):\n return self._authObj\n\n obj = json.loads(http.get(self.__accessTokenUrl, params={\n 'grant_type': 'client_credentials',\n 'client_id': self._apiKey,\n 'client_secret': self._secretKey,\n }))\n\n self._isCloudUser = not self._isPermission(obj)\n\n return obj\n\n def _isPermission(self, authObj):\n \"\"\"\n check whether permission\n \"\"\"\n\n scopes = authObj.get('scope', False) \n if scopes == False:\n return False\n\n intersection = self.__scopes.intersection(set(scopes.split(' ')))\n\n return not not intersection\n\n def _getParams(self, authObj):\n \"\"\"\n api request http url params\n \"\"\"\n\n params = {}\n\n if self._isCloudUser == False:\n params['access_token'] = authObj['access_token']\n\n return params\n\n def _getAuthHeaders(self, method, url):\n \"\"\"\n api request http headers\n \"\"\"\n if self._isCloudUser == False:\n return {}\n\n # UTC timestamp\n timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n urlResult = urlparse(url)\n host = urlResult.hostname\n path = urlResult.path\n version, expire, signatureHeaders = '1', '1800', 'host'\n\n # 1 Generate SigningKey\n val = \"bce-auth-v%s/%s/%s/%s\" % (version, self._apiKey, timestamp, expire)\n signingKey = hmac.new(self._secretKey, val, hashlib.sha256).hexdigest().encode('utf-8')\n\n # 2 Generate CanonicalRequest\n # 2.1 Genrate CanonicalURI\n canonicalUri = quote(path)\n # 2.2 Generate CanonicalURI: not used here\n # 2.3 Generate CanonicalHeaders: only include host here\n canonicalHeaders = 'host:%s' % quote(host).strip()\n # 2.4 Generate CanonicalRequest\n canonicalRequest = '%s\\n%s\\n\\n%s' % (method.upper(), canonicalUri, canonicalHeaders)\n\n # 3 Generate Final Signature \n signature = hmac.new(signingKey, canonicalRequest, hashlib.sha256).hexdigest()\n authorization = 'bce-auth-v%s/%s/%s/%s/%s/%s' % (version, self._apiKey, timestamp, expire, signatureHeaders, signature)\n\n return {\n 'Host': host,\n 'x-bce-date': timestamp,\n 'accept': '*/*',\n 'authorization': authorization,\n }\n", "sub_path": "venv/Lib/site-packages/aip/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 4571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "http.post", "line_number": 60, "usage_type": "call"}, {"api_name": "http.post", "line_number": 66, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 93, "usage_type": "call"}, {"api_name": "http.get", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "urlparse.urlparse", "line_number": 137, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 144, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 144, "usage_type": "attribute"}, {"api_name": "urllib.quote", "line_number": 148, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 151, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 156, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 156, "usage_type": "attribute"}]}
+{"seq_id": "434992406", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom sphinx.util.compat import Directive\n\nimport io\nimport os\nimport sys\nimport re\n\nclass question(nodes.General, nodes.Element): pass\n\ndef visit_question_node(self, node):\n self.body.append(\" \")\n self.body.append(\"
\"+node[\"ques\"]+\"
\")\n if node[\"a\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"a\"]+\"
\")\n if node[\"b\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"b\"]+\"
\")\n if node[\"c\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"c\"]+\"
\")\n if node[\"d\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"d\"]+\"
\")\n if node[\"e\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"e\"]+\"
\")\n if node[\"f\"]:\n self.body.append(\"
\\n\"+\" \" + node[\"f\"]+\"
\")\n self.body.append(\"
\")\n self.body.append(\"
\")\n self.body.append(\"
\")\n\n self.body.append(\"\")\n\n\ndef depart_question_node(self, node):\n pass\n\nclass Question(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 7\n final_argument_whitespace = False\n option_spec = {\n \"ra\": directives.unchanged,\n \"a\": directives.unchanged,\n \"b\": directives.unchanged,\n \"c\": directives.unchanged,\n \"d\": directives.unchanged,\n \"e\": directives.unchanged,\n \"f\": directives.unchanged,\n }\n\n def run(self):\n id = \"\".join(self.arguments)\n id = id.replace(\"'\", \"\")\n id = id.replace(\"?\", \"\")\n ques = \" \".join(self.arguments)\n\n ra = None\n a = None\n b = None\n c = None\n d = None\n e = None\n f = None\n\n if \"ra\" in self.options:\n ra = self.options[\"ra\"]\n if \"a\" in self.options:\n a = self.options[\"a\"]\n if \"b\" in self.options:\n b = self.options[\"b\"]\n if \"c\" in self.options:\n c = self.options[\"c\"]\n if \"d\" in self.options:\n d = self.options[\"d\"]\n if \"e\" in self.options:\n e = self.options[\"e\"]\n if \"f\" in self.options:\n f = self.options[\"f\"]\n\n return [question(id=id, ques=ques, ra=ra, a=a, b=b, c=c, d=d, e=e, f=f)]\n\n\ndef setup(app):\n app.add_node(question, html=(visit_question_node, depart_question_node))\n app.add_directive(\"question\", Question)", "sub_path": "back_annotation_loop/elec1601_sphinx_v4/source/exts/question.py", "file_name": "question.py", "file_ext": "py", "file_size_in_byte": 4496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "docutils.nodes.General", "line_number": 14, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 14, "usage_type": "name"}, {"api_name": "docutils.nodes.Element", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sphinx.util.compat.Directive", "line_number": 62, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 68, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 68, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 69, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 69, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 70, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 70, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 71, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 71, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 72, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 72, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 73, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 73, "usage_type": "name"}, {"api_name": "docutils.parsers.rst.directives.unchanged", "line_number": 74, "usage_type": "attribute"}, {"api_name": "docutils.parsers.rst.directives", "line_number": 74, "usage_type": "name"}]}
+{"seq_id": "42662080", "text": "# from pyzbar import pyzbar\r\n# import argparse\r\n# import cv2\r\n# # construct the argument parser and parse the arguments\r\n# ap = argparse.ArgumentParser()\r\n# ap.add_argument(\"-i\", \"--image\", required=True,\r\n# help=\"path to input image\")\r\n# args = vars(ap.parse_args())\r\n\r\n# # load the input image\r\n# image = cv2.imread(args[\"C:\\\\Users\\\\jfern\\\\OneDrive\\\\Desktop\\\\12345.png\"])\r\n\r\n# # find the barcodes in the image and decode each of the barcodes\r\n# barcodes = pyzbar.decode(image)\r\n\r\n# for barcode in barcodes:\r\n# \t# extract the bounding box location of the barcode and draw the\r\n# \t# bounding box surrounding the barcode on the image\r\n# \t(x, y, w, h) = barcode.rect\r\n# \tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n\r\n# \t# the barcode data is a bytes object so if we want to draw it on\r\n# \t# our output image we need to convert it to a string first\r\n# \tbarcodeData = barcode.data.decode(\"utf-8\")\r\n# \tbarcodeType = barcode.type\r\n\r\n# \t# draw the barcode data and barcode type on the image\r\n# \ttext = \"{} ({})\".format(barcodeData, barcodeType)\r\n# \tcv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,\r\n# \t\t0.5, (0, 0, 255), 2)\r\n\r\n# \t# print the barcode type and data to the terminal\r\n# \tprint(\"[INFO] Found {} barcode: {}\".format(barcodeType, barcodeData))\r\n\r\n# # show the output image\r\n# cv2.imshow(\"Image\", image)\r\n# cv2.waitKey(0)\r\n\r\n\r\nimport hashlib\r\nfrom imutils.video import VideoStream\r\nfrom pyzbar import pyzbar\r\nimport argparse\r\nimport datetime\r\nimport imutils\r\nimport time\r\nimport cv2\r\n\r\na = 0\r\n\r\n# construct the argument parser and parse the arguments\r\n# ap = argparse.ArgumentParser()\r\n# ap.add_argument(\"-o\", \"--output\", type=str, default=\"\",\r\n# help=\"path to output CSV file containing barcodes\")\r\n# args = vars(ap.parse_args())\r\n\r\nprint(\"[INFO] starting video stream...\")\r\n# vs = VideoStream(src=0).start()\r\nvs = VideoStream(0).start()\r\ntime.sleep(1.0)\r\n\r\n# open the output CSV file for writing and initialize the set of\r\n# barcodes found thus far\r\n# csv = open(args[\"output\"], \"w\")\r\nfound = set()\r\nwhile True:\r\n # grab the frame from the threaded video stream and resize it to\r\n # have a maximum width of 400 pixels\r\n frame = vs.read()\r\n frame = imutils.resize(frame, width=400)\r\n\r\n # find the barcodes in the frame and decode each of the barcodes\r\n barcodes = pyzbar.decode(frame)\r\n # loop over the detected barcodes\r\n for barcode in barcodes:\r\n # extract the bounding box location of the barcode and draw\r\n # the bounding box surrounding the barcode on the image\r\n (x, y, w, h) = barcode.rect\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n\r\n # the barcode data is a bytes object so if we want to draw it\r\n # on our output image we need to convert it to a string first\r\n barcodeData = barcode.data.decode(\"utf-8\")\r\n barcodeType = barcode.type\r\n\r\n # draw the barcode data and barcode type on the image\r\n text = \"{}\".format(barcodeData)\r\n cv2.putText(frame, text, (x, y - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n\r\n a = 1\r\n\r\n # if the barcode text is currently not in our CSV file, write\r\n # the timestamp + barcode to disk and update the set\r\n # if barcodeData not in found:\r\n # csv.write(\"{},{}\\n\".format(datetime.datetime.now(),\r\n # barcodeData))\r\n # csv.flush()\r\n # found.add(barcodeData)\r\n\r\n # show the output frame\r\n cv2.imshow(\"Barcode Scanner\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n # if the `q` key was pressed, break from the loop\r\n if key == ord(\"q\") or a == 1:\r\n break\r\n\r\n# close the output CSV file do a bit of cleanup\r\nprint(\"[INFO] cleaning up...\")\r\nprint(text)\r\n\r\n# initializing string\r\nstr = text\r\n\r\n\r\n# encoding GeeksforGeeks using encode()\r\n# then sending to SHA256()\r\nresult = hashlib.sha256(str.encode())\r\n\r\n# printing the equivalent hexadecimal value.\r\nprint(\"The hexadecimal equivalent of SHA256 is : \")\r\nprint(result.hexdigest())\r\n\r\nprint(\"\\r\")\r\n# csv.close()\r\ncv2.destroyAllWindows()\r\nvs.stop()\r\n\r\n\r\n", "sub_path": "DMCE final codebase/blockchain_client/scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 4163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "imutils.video.VideoStream", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "imutils.resize", "line_number": 70, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar.decode", "line_number": 73, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar", "line_number": 73, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 103, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 127, "usage_type": "call"}]}
+{"seq_id": "444358816", "text": "\"\"\"\nDjango settings for agencia24 project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nPROJECT_ROOT = BASE_DIR\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'i1)hxlju1t0i0-7kq&!&usy*2^xvx2fn4d!oa(vbdfbf1f3hs8'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\t# 3rd party apps I added:\n 'axes',\n 'bootstrap3_datetime',\n 'corsheaders',\n 'django_extensions',\n 'django_modalview',\n #'debug_toolbar',\n 'django_unused_media',\n 'django_user_agents',\n 'fixture_magic',\n 'longerusernameandemail',\n 'mathfilters',\n 'oauth2_provider', # add 'WSGIPassAuthorization On' to httpd.conf file\n\t'pagination',\n 'passwords',\n\t'registration',\n 'widget_tweaks',\n\t# My apps\n 'bet',\n 'simple_webservice',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'pagination.middleware.PaginationMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'axes.middleware.FailedLoginMiddleware',\n 'oauth2_provider.middleware.OAuth2TokenMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n)\n\nROOT_URLCONF = 'agencia24.urls'\n\nWSGI_APPLICATION = 'agencia24.wsgi.application'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_ROOT, \"templates\"),\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'bet.context_processors.debug',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ],\n #'debug': False,\n },\n },\n]\nfrom django.template.loaders import eggs\n\nif not DEBUG:\n TEMPLATES[0]['OPTIONS']['loaders'] = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n )),\n)\n\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'es-ar'\n\nTIME_ZONE = 'America/Argentina/Cordoba'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\nMEDIA_URL = \"/site_media/media/\"\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, \"site_media\", \"_media\")\n\nSTATIC_URL = '/site_media/static/'\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, \"static\")\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, \"site_media\", \"static\"),\n)\n\nLOCALE_PATHS = (\n os.path.join(PROJECT_ROOT, 'locale').replace('\\\\', '/'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': 'A24 %(levelname)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'agencia24_default': {\n 'handlers': ['console', 'mail_admins'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n#===============================================================================\n# REGISTRATION!\n#===============================================================================\n\nACCOUNT_ACTIVATION_DAYS = 5\nSEND_ACTIVATION_EMAIL = False\n\nEMAIL_HOST_USER = \"no_reply_agencia24\"\nEMAIL_HOST_PASSWORD = \"noresponder\"\nEMAIL_HOST = 'smtp.webfaction.com'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = '(Agencia24) '\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\n\nREQUIRE_UNIQUE_EMAIL = False\n\n#===============================================================================\n\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/accounts/login'\n\n\n#===============================================================================\n# MERCADOPAGO\n#===============================================================================\n\nMP_ACCESS_TOKEN = \"TEST-2692549476916264-012507-3c26394260b30dfc3c78a004094cf36d__LA_LB__-162591608\"\n\nMP_CLIENT_ID = \"2692549476916264\"\nMP_SECRET_KEY = \"oB4gYLQz5lNhFRWOuXt0WNW4umSW2mvj\"\n\n#===============================================================================\n# PDFKIT\n#===============================================================================\n\nWKHTMLTOPDF_PATH = ''\n\n#===============================================================================\n# PUSH\n#===============================================================================\n\n# IOS\nIOS_PUSH_HEADERS = {\n \"Authorization\": \"key=AIzaSyAuMBsR2J-i1Ne9gHH_1DL8jbHEBYJ5IgU\",\n \"content-Type\": \"application/json\"\n}\n\nANDROID_PUSH_HEADERS = {\n \"Authorization\": \"key=AIzaSyD-dcMsjsQsWbJ1tPwjsnMdwym79mE8xDU\",\n #\"Authorization\": \"key=AIzaSyA-D9yqibGabnUb_5bqQZptdQFxBQndGuc\",\n \"content-Type\": \"application/json\"\n}\n\n#===============================================================================\n# DJANGO OAUTH TOOLKIT\n#===============================================================================\n\nOAUTH2_PROVIDER = {\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 600, # Seconds\n 'REFRESH_TOKEN_EXPIRE_SECONDS': 6*3600,\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nAUTHENTICATION_BACKENDS = (\n 'oauth2_provider.backends.OAuth2Backend',\n # Uncomment following if you want to access the admin\n 'django.contrib.auth.backends.ModelBackend'\n)\n\n#===============================================================================\n# DJANGO-PASSWORDS!\n#===============================================================================\n\nPASSWORD_MIN_LENGTH = 4\n\nPASSWORD_COMPLEXITY = { # You can omit any or all of these for no limit for that particular set\n \"UPPER\": 0, # Uppercase\n \"LOWER\": 0, # Lowercase\n \"LETTERS\": 0, # Either uppercase or lowercase letters\n \"DIGITS\": 0, # Digits\n \"SPECIAL\": 0, # Not alphanumeric, space or punctuation character\n \"WORDS\": 0 # Words (alphanumeric sequences separated by a whitespace or punctuation character)\n}\n\n#===============================================================================\n#===============================================================================\n\nQUINI6_MAX_NUMBER = 45\nLOTO_MAX_NUMBER = 41\nLOTO_MAX_EXTRA = 9\nLOTO5_MAX_NUMBER = 36\nBRINCO_MAX_NUMBER = 39\n\n#===============================================================================\n# DJANGO USER AGENT\n#===============================================================================\n\n# TODO!\n# Cache backend is optional, but recommended to speed up user agent parsing\n#CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n# 'LOCATION': '127.0.0.1:11211',\n# }\n#}\n\n# Name of cache backend to cache user agents. If it not specified default\n# cache alias will be used. Set to `None` to disable caching.\n#USER_AGENTS_CACHE = 'default'\n\n#===============================================================================\n# DJANGO AXES\n#===============================================================================\n\nfrom django.utils.timezone import timedelta\nAXES_COOLOFF_TIME = timedelta(minutes=20) # Hours\nAXES_LOCKOUT_TEMPLATE = 'registration/login.html'\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = True\n\n\"\"\"\nAXES_LOGIN_FAILURE_LIMIT: The number of login attempts allowed before a record is created for the failed logins. Default: 3\nAXES_LOCK_OUT_AT_FAILURE: After the number of allowed login attempts are exceeded, should we lock out this IP (and optional user agent)? Default: True\nAXES_USE_USER_AGENT: If True, lock out / log based on an IP address AND a user agent. This means requests from different user agents but from the same IP are treated differently. Default: False\nAXES_COOLOFF_TIME: If set, defines a period of inactivity after which old failed login attempts will be forgotten. Can be set to a python timedelta object or an integer. If an integer, will be interpreted as a number of hours. Default: None\nAXES_LOGGER: If set, specifies a logging mechanism for axes to use. Default: 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE: If set, specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as context variables. Default: None\nAXES_LOCKOUT_URL: If set, specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template will be used. Default: None\nAXES_VERBOSE: If True, you'll see slightly more logging for Axes. Default: True\nAXES_USERNAME_FORM_FIELD: the name of the form field that contains your users usernames. Default: username\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP: If True prevents to login from IP under particular user if attempts limit exceed, otherwise lock out based on IP. Default: False\n\"\"\"\n\n#===============================================================================\n\nADMINS = [('Developer', 'developer@liricus.com.ar')]\n#DEBUG = False\n#ALLOWED_HOSTS = ['*']\n\nSUPPORTED_IMPORT_EXT = ('.csv',)\nEXTRACT_SEPARATOR = '*'\n\nfrom local_settings_sf import *\n", "sub_path": "agencia24/settings_sf.py", "file_name": "settings_sf.py", "file_ext": "py", "file_size_in_byte": 11803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 326, "usage_type": "call"}]}
+{"seq_id": "452394228", "text": "# -*- coding: utf-8 -*-\nfrom .file_utils import fetch_or_upload_file_links, fetch_or_upload_html_links\nimport time as _time\nfrom installed_clients.baseclient import ServerError as _DFUError\nfrom uuid import uuid4\n\n\"\"\" Utilities for creating reports using DataFileUtil \"\"\"\n\n\ndef create_report(params, dfu):\n \"\"\"\n Create a simple report\n :param params: see the KIDL spec for the create() parameters\n :param dfu: instance of DataFileUtil\n :return: report data\n \"\"\"\n report_name = \"report_\" + str(uuid4())\n workspace_id = _get_workspace_id(dfu, params)\n # Empty defaults for merging\n report_data = {\n 'objects_created': [],\n 'text_message': '',\n }\n report_data.update(params['report'])\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{\n 'type': 'KBaseReport.Report',\n 'data': report_data,\n 'name': report_name,\n 'meta': {},\n 'hidden': 1\n }]\n }\n obj = _save_object(dfu, save_object_params)\n ref = _get_object_ref(obj)\n return {'ref': ref, 'name': report_name}\n\n\ndef create_extended(params, dfu, templater):\n \"\"\"\n Create an extended report\n This will upload files to shock if you provide scratch paths instead of shock_ids\n :param params: see the KIDL spec for create_extended_report() parameters\n :param dfu: instance of DataFileUtil\n :return: uploaded report data - {'ref': r, 'name': n}\n \"\"\"\n file_links = params.get('file_links', [])\n html_links = params.get('html_links', [])\n files = fetch_or_upload_file_links(dfu, file_links, templater) # see ./file_utils.py\n html_files = fetch_or_upload_html_links(dfu, html_links, templater)\n report_data = {\n 'text_message': params.get('message'),\n 'file_links': files,\n 'html_links': html_files,\n 'warnings': params.get('warnings', []),\n 'direct_html': params.get('direct_html'),\n 'direct_html_link_index': params.get('direct_html_link_index'),\n 'objects_created': params.get('objects_created', []),\n 'html_window_height': params.get('html_window_height'),\n 'summary_window_height': params.get('summary_window_height')\n }\n report_name = params.get('report_object_name', 'report_' + str(uuid4()))\n workspace_id = _get_workspace_id(dfu, params)\n save_object_params = {\n 'id': workspace_id,\n 'objects': [{\n 'type': 'KBaseReport.Report',\n 'data': report_data,\n 'name': report_name,\n 'meta': {},\n 'hidden': 1\n }]\n }\n obj = _save_object(dfu, save_object_params)\n ref = _get_object_ref(obj)\n return {'ref': ref, 'name': report_name}\n\n\ndef _get_workspace_id(dfu, params):\n \"\"\"\n Get the workspace ID from the params, which may either have 'workspace_id'\n or 'workspace_name'. Workspace ID is immutable so should take precedence.\n \"\"\"\n if 'workspace_id' in params:\n return params.get('workspace_id')\n\n return dfu.ws_name_to_id(params['workspace_name'])\n\n\ndef _get_object_ref(obj):\n \"\"\" Get the reference string from an uploaded dfu object \"\"\"\n return str(obj[6]) + '/' + str(obj[0]) + '/' + str(obj[4])\n\n\ndef _save_object(dfu, params):\n \"\"\" Save an object with DFU using error handling \"\"\"\n try:\n return dfu.save_objects(params)[0]\n except _DFUError as err:\n print(f'{_time.time()} DataFileUtil exception: {err}')\n raise err\n except Exception as err:\n print(f'{_time.time()} Unexpected DataFileUtil exception: {err}')\n raise err\n", "sub_path": "lib/KBaseReport/utils/report_utils.py", "file_name": "report_utils.py", "file_ext": "py", "file_size_in_byte": 3588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "file_utils.fetch_or_upload_file_links", "line_number": 50, "usage_type": "call"}, {"api_name": "file_utils.fetch_or_upload_html_links", "line_number": 51, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 63, "usage_type": "call"}, {"api_name": "installed_clients.baseclient.ServerError", "line_number": 100, "usage_type": "name"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}]}
+{"seq_id": "308672346", "text": "import time\nimport math\nimport numpy as np\nimport scipy\nfrom scipy import optimize\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport csv\nimport pandas as pd\nfrom definitions import *\nimport callFile\nfrom scipy import stats as stats\n\ndef main(N,smallestIncrement):\n\n\n # attainable_mass_simulate and weighted_stddev are taken directly from grind size application source code, adapted only\n # to work with this code\n #Method to calculate attainable mass\n def attainable_mass_simulate(volumes):\n \n #This could be done better analytically\n depth_limit = 0.1 #mm\n \n radii = (3.0/4.0*volumes/np.pi)**(1/3)\n unreachable_volumes = np.full(volumes.size, 0.0)\n \n iboulders = np.where(radii > depth_limit)\n unreachable_volumes[iboulders[0]] = 4.0/3.0*np.pi*(radii[iboulders[0]] - depth_limit)**3\n reachable_volumes = volumes - unreachable_volumes\n \n return reachable_volumes\n\n def weighted_stddev(data, weights, frequency=True, unbiased=True):\n \n #Calculate the bias correction estimator\n if unbiased is True:\n if frequency is True:\n bias_estimator = (np.nansum(weights) - 1.0)/np.nansum(weights)\n else:\n bias_estimator = 1.0 - (np.nansum(weights**2))/(np.nansum(weights)**2)\n else:\n bias_estimator = 1.0\n \n #Normalize weights\n weights /= np.nansum(weights)\n \n #Calculate weighted average\n wmean = np.nansum(data*weights)\n \n #Deviations from average\n deviations = data - wmean\n \n #Un-biased weighted variance\n wvar = np.nansum(deviations**2*weights)/bias_estimator\n \n #Un-biased weighted standard deviation\n wstddev = np.sqrt(wvar)\n \n return wstddev\n\n grindSetting=np.arange(1,N+1)\n ##### Extract Data from .csv files #####\n # Loop through each grind setting\n for a in range(1,N+1): # need to start at 1 because filenames are 1-N\n # import each file, list the values, then change the lists into arrays\n statsArray = np.array(pd.read_csv(\"data/setting%d_stats.csv\" % a))\n# statsArray = np.array(statsList)\n\n \n # get the value from each stats column in the order: '', avg_diam, std_diam, avg_surface, std_surface, efficiency, quality\n avgDiam.append(float(statsArray[:,1]))\n #stdDiam.append(float(statsArray[:,2]))\n avgSurf.append(float(statsArray[:,3])); stdSurf.append(float(statsArray[:,4]))\n efficiency.append(float(statsArray[:,5])); quality.append(float(statsArray[:,6]))\n\n # get the value from each .csv column in the order: ID, surface, roundness, short_axis, long_axis, volume, pixel_scale\n # data for grind setting 'a' can be retrieved as parameter[a]\n settingArray = np.array(pd.read_csv(\"data/setting%d.csv\" % a))\n# settingArray = np.array(settingList)\n \n\n pixel_scale = settingArray[:,6]\n surfaces = settingArray[:,1]/pixel_scale**2\n volumes = settingArray[:,5]/pixel_scale**3\n attainable_masses = attainable_mass_simulate(volumes)\n data_weights = surfaces\n weights = np.maximum(np.ceil(attainable_masses/(coffee_cell_size/1e3)**3),1)\n surfacesAverage = np.sum(surfaces*weights)/np.sum(weights)\n stdDiamUpper.append(np.max(surfaces)-surfacesAverage)\n stdDiamLower.append(surfacesAverage-np.min(surfaces))\n surfacesStats = stats.describe(surfaces)\n skewness.append(surfacesStats[4])\n kurtosis.append(surfacesStats[5])\n\n # Calculate the average adjustment made between each whole-number grind setting and print results\n for b in range(0,N-1):\n settingAdjustment.append(avgSurf[b+1] - avgSurf[b])\n avgAdjustment = np.sum(settingAdjustment)/len(settingAdjustment)\n\n print()\n print(\"-----------------------------------------------\")\n print(\"---------Grinder Adjustment Parameters --------\")\n print()\n print(\"Total Adjustment Range (Setting {}-1): {:.2}mm^2\".format(N,avgSurf[-1]-avgSurf[0]))\n print(\"Average Adjustment Between Each Setting: {:.2}mm^2\".format(avgAdjustment))\n print()\n\n ##### Information To Plot ##### \n print()\n whichInformation = input(\"Which information would you like to view? (d)iameter,(s)urface: \")\n dataTypes=[avgDiam, stdDiam, avgSurf, stdSurf]\n def dataType(type):\n global data, dataError, pltTitle, units\n if type == \"d\":\n data = dataTypes[0]\n dataError = dataTypes[1]\n pltTitle = \"Average Diameter\"\n units = \"mm\"\n elif type == \"s\":\n data = dataTypes[2]\n dataError = dataTypes[3]\n pltTitle = \"Average Surface Area\"\n units = \"mm^2\"\n return data, dataError\n dataType(whichInformation)\n\n\n ##### Fitting #####\n print()\n # Ask user which regression form to use\n #fitType = input(\"Which Fit Type Would You Like? (l)inear,(q)uad: \")\n fitType = 'l'\n fitTypes = [funcLinear, funcQuad]\n def fittingFunction(type):\n global fitTypePlot\n\n # If the type is linear, perform all linear-regression related procedures\n if type == \"l\":\n popt, pcov = curve_fit(funcLinear, grindSetting, data, maxfev=2000) # the regression\n perr = np.sqrt(np.diag(pcov)) # error in the regression\n ss_res = np.sum((data - funcLinear(grindSetting,*popt))**2)\n ss_tot = np.sum ((data-np.mean(data))**2)\n r_squared = 1 - (ss_res/ss_tot)\n plt.plot(grindSetting, funcLinear(grindSetting, *popt), label=\"Linear Fit\", color='green') # plots the regression against grind setting\n fitTypePlot = fitTypes[0]\n# plt.text(grindSetting[0],data[-1],r'$Equation\\ of\\ Linear\\ Fit: y={:.2}x +({:.2})$'.format(popt[0],popt[1])) # generate equation of fit on figure\n# plt.text(grindSetting[0],data[9],r'$R^2={:.2}$'.format(r_squared)) # generate equation of fit r^2 value on figure\n print()\n print(\"------------- Fit Parameters ------------\") \n print(\"\\n Slope = {:.2} +/- {:.2}\".format(popt[0],perr[1]))\n print(\"\\n Intercept = {:.2} +/- {:.2}mm\".format(popt[1],perr[0]))\n print(\"\\n R^2 = {:.2}\".format(r_squared))\n print()\n\n elif type == \"q\":\n popt, pcov = curve_fit(funcQuad, grindSetting, data, maxfev=2000)\n perr = np.sqrt(np.diag(pcov))\n plt.plot(grindSetting, funcQuad(grindSetting, *popt), label=\"Quadratic Fit\", color='green')\n plt.text(grindSetting[0],data[10],r'$Equation\\ of\\ Quadratic\\ Fit: y={:.2}x^2+{:.2}x+{:.2}$'.format(popt[0],popt[1],popt[2]))\n fitTypePlot = fitTypes[1]\n\n print()\n print(\"------------- Fit Parameters ------------\") \n print(\"\\n a: {:.2} +/- {:.2}\".format(popt[0],perr[0]))\n print(\"\\n b: {:.2} +/- {:.2}\".format(popt[1],perr[1]))\n print(\"\\n c: {:.2} +/- {:.2}\".format(popt[2],perr[2]))\n print()\n return popt, pcov, fitTypePlot\n fittingFunction(fitType)\n\n\n ##### Plotting #####\n# input(\"Press Enter To Continue To Plots....\")\n plt.title(\"{} vs. Grind Setting\".format(pltTitle))\n plt.xlabel(\"Grind Setting\")\n plt.xlim([0,N+1])\n plt.xticks(ticks=grindSetting)\n plt.ylabel(\"{} [{}]\".format(pltTitle,units))\n\n plt.errorbar(grindSetting,data, fmt='o', color='black', ecolor='red', capsize=2, label='{}'.format(pltTitle), xerr=smallestIncrement/2, yerr=dataError)\n# plt.errorbar(grindSetting,data, fmt='o', color='black', ecolor='red', capsize=2, label='{}'.format(pltTitle), xerr=smallestIncrement/2, yerr=[stdDiamLower,stdDiamUpper])\n\n for i in range(0,len(avgDiam)):\n # Annotate the values for the errorbars on the graph, each for upper and lower.\n plt.annotate(data[i],(grindSetting[i]+.1*max(data),data[i]), color='black')\n plt.annotate(dataError[i],(grindSetting[i],dataError[i]+data[i]), color='red', label=\"error\")\n# plt.annotate(data[i],(grindSetting[i],\"{:.2}\".format(skewness[i])), color='red', label=\"error\")\n# plt.annotate(\"{:.2}\".format(stdDiamLower[i]),(grindSetting[i],stdDiamLower[i]-data[i]),color='purple',label='lower error')\n# plt.annotate(\"{:.2}\".format(stdDiamUpper[i]),(grindSetting[i],stdDiamUpper[i]+data[i]),color='orange',label='upper error')\n plt.legend()\n plt.savefig(\"{} Plot.png\".format(pltTitle), dpi=199)\n plt.show()\n\n", "sub_path": "grindSize/capressoInfinity/grinderAnalysis.py", "file_name": "grinderAnalysis.py", "file_ext": "py", "file_size_in_byte": 8777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.nansum", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.stats.describe", "line_number": 92, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}]}
+{"seq_id": "222528979", "text": "from ast import literal_eval #\nfrom nltk.stem import PorterStemmer\nimport math\nimport os\nimport sys\nimport re\n\n# Tokenization class which has a tokenization function\nclass Tokenization:\n def __init__(self):\n self.tokens_list = []\n # This function takes a opened file as a input argument, and it tokenizes the file with several regular expressions.\n # Regex patterns are compiled for matching several patterns that occur in the file.\n def tokenization(self, file):\n\n date_rx = re.compile(r'((?:0[1-9]|[12][0-9]|3[01])[./-](?:(?:0?[1-9]|1[0-2])|(?:\\w+))[./-](?:(?:\\d{2})?\\d{2}))') # Regex for Date\n single_quotes_rx = re.compile(r'(?:^|\\s)\\'([^\\']*?)\\'(?:$|\\s)') # Regex for single Quotes\n hyphenated_rx = re.compile(r\"([\\w]+(?:\\n-[\\w]+)+)\") # Regex for hyphenated words\n name1_rx = re.compile(r\"([A-Z][\\w]+[.'\\s?](?:[A-Z]['.]\\s?)[A-Z][\\w]+(?:.[A-Z][\\w]+)?)\") # Regex for finding names\n cont_capital_rx = re.compile(r\"([A-Z][a-z]+[ ](?:[A-Z][a-z]+[ ]?)+)\") # Regex for matching continoous captial words\n acronyms_rx = re.compile(r\"((?:[A-Z]\\.)+(?:[A-Z]+))\") # Regex for acronyms\n contraction_rx = re.compile(r\"([\\w]+'[\\w]+)\") # regex for contraction words\n\n puncList = [\".\", \";\", \":\", \"!\", \"?\", \"/\", \"\\\\\", \",\", \"#\", \"@\", \"$\", \"&\", \")\", \"(\", \"\\\"\",'\\n','-','_']\n # punctuation list is pre defined for removing punctuation present in the list from the tokenized words\n\n # Empty lists are declared for storing the matched words in their coresponding list\n date_list = []\n single_quotes = []\n hyphenated_list = []\n name1_list = []\n cont_capital_list = []\n acronyms_list = []\n contraction_list = []\n\n # Getting IP address from the file\n if re.search(IP_rx,file):\n IP_address_list = re.findall(IP_rx,file) # all matched groups are stored in the list\n # print('IP',IP_address_list)\n for i in range(len(IP_address_list)):\n file = file.replace(IP_address_list[i], '') # after a group is matched, its removed from the file\n IP_address_list[i] = str(IP_address_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Email patterns from the file\n if re.search(email_rx,file):\n email_list = re.findall(email_rx,file) # all matched groups are stored in the list\n # print('Email', email_list)\n for i in range(len(email_list)):\n file = file.replace(email_list[i], '') # after a group is matched, its removed from the file\n email_list[i] = str(email_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Date patterns from the file\n if re.search(date_rx,file):\n date_list = re.findall(date_rx,file) # all matched groups are stored in the list\n # print('date', date_list)\n for i in range(len(date_list)):\n file = file.replace(date_list[i], '') # after a group is matched, its removed from the file\n date_list[i] = str(date_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting URL patterns from the file\n if re.search(URL_rx, file):\n URL_list = re.findall(URL_rx, file) # all matched groups are stored in the list\n for i in range(len(URL_list)):\n file = file.replace(URL_list[i], '') # after a group is matched, its removed from the file\n URL_list[i] = str(URL_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n no_removal = URL_list + date_list + email_list + IP_address_list\n # URL, Date, Email and IP adress list are combined as a single list\n\n # Getting Single quotes patterns from the file\n if re.search(single_quotes_rx,file):\n single_quotes = re.findall(single_quotes_rx,file) # all matched groups are stored in the list\n # print('Quotes', single_quotes)\n for i in range(len(single_quotes)):\n file = file.replace(single_quotes[i], '') # after a group is matched, its removed from the file\n single_quotes[i] = str(single_quotes[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting Hyphenated patterns from the file\n if re.search(hyphenated_rx,file):\n hyphenated_list = re.findall(hyphenated_rx,file) # all matched groups are stored in the list\n # print('hyphenated_rx',hyphenated_list)\n for i in range(len(hyphenated_list)):\n file = file.replace(hyphenated_list[i], '') # after a group is matched, its removed from the file\n hyphenated_list[i] = str(hyphenated_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting name patterns from the file\n if re.search(name1_rx,file):\n name1_list = re.findall(name1_rx,file) # all matched groups are stored in the list\n # print('name',name1_list)\n for i in range(len(name1_list)):\n file = file.replace(name1_list[i], '') # after a group is matched, its removed from the file\n name1_list[i] = str(name1_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting contraction word patterns from the file\n if re.search(contraction_rx,file):\n contraction_list = re.findall(contraction_rx,file) # all matched groups are stored in the list\n # print('contraction',contraction_list)\n for i in range(len(contraction_list)):\n file = file.replace(contraction_list[i], '') # after a group is matched, its removed from the file\n if '\\'s' in contraction_list[i]:\n contraction_list[i] = contraction_list[i].replace('\\'s','')\n contraction_list[i] = str(contraction_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting continous capital words patterns from the file\n if re.search(cont_capital_rx,file):\n cont_capital_list = re.findall(cont_capital_rx,file) # all matched groups are stored in the list\n # print('cont_capital',cont_capital_list)\n for i in range(len(cont_capital_list)):\n file = file.replace(cont_capital_list[i], '') # after a group is matched, its removed from the file\n cont_capital_list[i] = str(cont_capital_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n # Getting acronyms patterns from the file\n if re.search(acronyms_rx,file):\n acronyms_list = re.findall(acronyms_rx,file) # all matched groups are stored in the list\n # print('acronyms',acronyms_list)\n for i in range(len(acronyms_list)):\n file = file.replace(acronyms_list[i], '') # after a group is matched, its removed from the file\n acronyms_list[i] = str(acronyms_list[i]).strip('`()*&^%$#@!+_-~{}[]:;?/.,\\' ') # strip function is used for obtained string present in the list\n\n self.tokens_list = hyphenated_list + name1_list + cont_capital_list +acronyms_list + contraction_list + single_quotes\n # The tokens_list contains all tokens from hyphenated list, name list, cont_capital_list + acronyms List and from single quote list\n\n # The tokens_list is iterated for removing \\n whihc also has '-',\n # this case is occuring for hyphenated words that are matched from the file\n for i in range(len(self.tokens_list)):\n if '\\n' in self.tokens_list[i]:\n self.tokens_list[i] = self.tokens_list[i].replace('\\n','')\n if '-' in self.tokens_list[i]:\n self.tokens_list[i] = self.tokens_list[i].replace('-','')\n\n # All the tokens in tokens_list are iterated for punctuation removal\n for punct in range(len(puncList)):\n for word in range(len(self.tokens_list)):\n if puncList[punct] in self.tokens_list[word]:\n self.tokens_list[word] = self.tokens_list[word].replace(puncList[punct], '')\n\n # After all the regex patterns are obtained, rest of the file split stored in words_list\n words_list = file.split()\n # print('words',words_list)\n\n # Punctuation removal is done for the words_list\n for punct in range(len(puncList)):\n for word in range(len(words_list)):\n if puncList[punct] in words_list[word]:\n words_list[word] = words_list[word].replace(puncList[punct],'')\n\n # print('words_no_punc', words_list)\n words_filtered = []\n for each in range(len(words_list)):\n if words_list[each] != \"''\" and words_list[each] != '':\n words_filtered.append(words_list[each])\n\n\n # print('filtered',words_filtered)\n self.tokens_list = self.tokens_list + words_filtered + no_removal # the tokens_list is updated by combining all the list containing tokens\n return self.tokens_list # the final list is returned for a single file\n\nclass Stopword_removal:\n\n # In this class a function is defined for removing the stopwords from the tokens,\n # The stp_process function takes two arguments, tokens_list and stopword file.\n\n def __init__(self):\n self.final = []\n\n def stp_process(self, file, list):\n\n stopwords_file = open(file, 'r') # the stopwords file is opened\n stopwords_list = stopwords_file.readlines()\n for i in range(len(stopwords_list)):\n stopwords_list[i] = stopwords_list[i].replace('\\n', '')\n\n # the token list is iterated and if the token is not present in the stopwords the token is appened to a new list\n for j in range(len(list)):\n list[j] = list[j].lower()\n if list[j] not in stopwords_list:\n if len(list[j]) >= 3:\n self.final.append(list[j])\n return self.final\n\nclass Stemming:\n\n # In this class is defined for stemming process\n # A function is defined as stemming_process which takes a list as an input argument\n # Porter Stemmer is used from nltk package\n\n def __init__(self):\n self.stemmed = []\n\n def stemming_process(self,token_list):\n stemmer = PorterStemmer()\n for i in range(len(token_list)):\n self.stemmed.append(stemmer.stem(token_list[i]))\n return self.stemmed", "sub_path": "Info_extract.py", "file_name": "Info_extract.py", "file_ext": "py", "file_size_in_byte": 10874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}, {"api_name": "re.search", "line_number": 37, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "re.search", "line_number": 53, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 79, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 80, "usage_type": "call"}, {"api_name": "re.search", "line_number": 87, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 88, "usage_type": "call"}, {"api_name": "re.search", "line_number": 95, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 96, "usage_type": "call"}, {"api_name": "re.search", "line_number": 105, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 106, "usage_type": "call"}, {"api_name": "re.search", "line_number": 113, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 114, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "570563725", "text": "from lib import action\n\n\nclass ConsulCatalogServicesAction(action.ConsulBaseAction):\n def run(self,\n index=None,\n wait=None,\n consistency=None,\n dc=None,\n token=None):\n\n return (True, self.consul.catalog.services(\n index=index,\n wait=wait,\n consistency=consistency,\n dc=dc,\n token=token))\n", "sub_path": "actions/catalog_services.py", "file_name": "catalog_services.py", "file_ext": "py", "file_size_in_byte": 409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "lib.action.ConsulBaseAction", "line_number": 4, "usage_type": "attribute"}, {"api_name": "lib.action", "line_number": 4, "usage_type": "name"}]}
+{"seq_id": "159142535", "text": "# Copyright © 2020 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Retrieve the aliases for the entity.\"\"\"\nfrom http import HTTPStatus\n\nfrom flask import jsonify, request\nfrom flask_restx import Resource, cors\n\nfrom legal_api.models import Alias, Business\nfrom legal_api.utils.util import cors_preflight\n\nfrom .api_namespace import API\n\n\n@cors_preflight('GET,')\n@API.route('//aliases', methods=['GET', 'OPTIONS'])\n@API.route('//aliases/', methods=['GET', 'OPTIONS'])\nclass AliasResource(Resource):\n \"\"\"Business Aliases service.\"\"\"\n\n @staticmethod\n @cors.crossdomain(origin='*')\n def get(identifier, alias_id=None):\n \"\"\"Return a JSON of the aliases.\"\"\"\n business = Business.find_by_identifier(identifier)\n\n if not business:\n return jsonify({'message': f'{identifier} not found'}), HTTPStatus.NOT_FOUND\n\n # return the matching alias\n if alias_id:\n alias, msg, code = AliasResource._get_alias(business, alias_id)\n return jsonify(alias or msg), code\n\n aliases_list = []\n\n alias_type = request.args.get('type')\n if alias_type:\n aliases = Alias.find_by_type(business.id, alias_type.upper())\n else:\n aliases = business.aliases.all()\n\n for alias in aliases:\n alias_json = alias.json\n aliases_list.append(alias_json)\n\n return jsonify(aliases=aliases_list)\n\n @staticmethod\n def _get_alias(business, alias_id=None):\n # find by ID\n alias = None\n if alias_id:\n rv = Alias.find_by_id(alias_id=alias_id)\n if rv:\n alias = {'alias': rv.json}\n\n if not alias:\n return None, {'message': f'{business.identifier} alias not found'}, HTTPStatus.NOT_FOUND\n\n return alias, None, HTTPStatus.OK\n", "sub_path": "legal-api/src/legal_api/resources/v1/business/business_aliases.py", "file_name": "business_aliases.py", "file_ext": "py", "file_size_in_byte": 2405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask_restx.Resource", "line_number": 29, "usage_type": "name"}, {"api_name": "legal_api.models.Business.find_by_identifier", "line_number": 36, "usage_type": "call"}, {"api_name": "legal_api.models.Business", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 39, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "legal_api.models.Alias.find_by_type", "line_number": 50, "usage_type": "call"}, {"api_name": "legal_api.models.Alias", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "flask_restx.cors.crossdomain", "line_number": 33, "usage_type": "call"}, {"api_name": "flask_restx.cors", "line_number": 33, "usage_type": "name"}, {"api_name": "legal_api.models.Alias.find_by_id", "line_number": 65, "usage_type": "call"}, {"api_name": "legal_api.models.Alias", "line_number": 65, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 70, "usage_type": "name"}, {"api_name": "http.HTTPStatus.OK", "line_number": 72, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 72, "usage_type": "name"}, {"api_name": "legal_api.utils.util.cors_preflight", "line_number": 26, "usage_type": "call"}, {"api_name": "api_namespace.API.route", "line_number": 27, "usage_type": "call"}, {"api_name": "api_namespace.API", "line_number": 27, "usage_type": "name"}, {"api_name": "api_namespace.API.route", "line_number": 28, "usage_type": "call"}, {"api_name": "api_namespace.API", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "176954048", "text": "import os\nimport glob\nimport cdms2\nimport cdutil\nimport numpy as np\nimport csv\nfrom varid_dict import varid_longname\nfrom utils import climo\n\ndef var_seasons(var, seasons):\n \"Calculate seasonal climatology of each variable\"\n var_season_data = np.empty([len(seasons)])*np.nan\n cdutil.setTimeBoundsMonthly(var)\n for k, season in enumerate(seasons):\n if season == 'ANN':\n months = cdutil.times.Seasons('DJFMAMJJASON')\n else:\n months = cdutil.times.Seasons(str(season))\n var_season_data[k] = months.climatology(var)\n # convert units\n if var.id == 'tas':\n var_season_data = var_season_data-273.15\n\n if var.id == 'pr':\n var_season_data = var_season_data*3600.*24.\n \n \n return var_season_data\n\n\ndef seasonal_mean_table(parameter):\n \"\"\"Calculate seasonal mean climatology\"\"\"\n variables = parameter.variables\n seasons = parameter.season\n test_path = parameter.test_data_path\n obs_path = parameter.obs_path\n cmip_path = parameter.cmip_path\n output_path = parameter.output_path\n sites = parameter.sites\n \n test_model = parameter.test_data_set \n ref_models = parameter.ref_models\n\n # Calculate for test model\n test_var_season=np.empty([len(variables),len(seasons)])*np.nan\n test_file = glob.glob(os.path.join(test_path,'*'+test_model+'*mo*'+ sites[0]+'.nc')) #read in monthly test data\n if len(test_file) == 0:\n raise RuntimeError('No monthly data for test model were found.')\n \n fin = cdms2.open(test_file[0])\n \n print('test_model',test_model)\n\n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #test_var_season[j, :] = var_seasons(var, seasons)\n test_var_season[j, :] = climo(var, seasons)\n\n except:\n print(variable+\" not processed for \" + test_model)\n fin.close()\n\n # Calculate for observational data\n obs_var_season=np.empty([len(variables),len(seasons)])*np.nan\n print('ARM data')\n if sites[0] == 'sgp':\n obs_file = glob.glob(os.path.join(obs_path,'*ARMdiag*monthly_stat_'+ sites[0]+'.nc')) #read in monthly test data\n fin = cdms2.open(obs_file[0])\n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #obs_var_season[j, :] = var_seasons(var, seasons)\n obs_var_season[j, :] = climo(var, seasons)\n \n except:\n print(variable+\" not processed for obs\")\n fin.close()\n else:\n obs_file = glob.glob(os.path.join(obs_path,'*ARMdiag*monthly_climo*'+ sites[0]+'.nc')) #read in monthly test data\n fin = cdms2.open(obs_file[0]) \n for j, variable in enumerate(variables): \n try:\n var = fin (variable) \n \n #tmp\n obs_var_season[j,1:] = np.nanmean(np.reshape(var, (4,3)),axis=1)\n if variable == 'tas':\n obs_var_season[j,1:] = obs_var_season[j,1:] -273.15\n if variable == 'pr':\n obs_var_season[j,1:] = obs_var_season[j,1:] * 24.0\n if variable == 'prw':\n obs_var_season[j,1:] = obs_var_season[j,1:] * 10.0\n obs_var_season[j,0] = np.nanmean(obs_var_season[j,1:])\n \n #var24 = np.concatenate((var,var),axis=0)\n \n except:\n print(variable+\" not processed for obs\")\n fin.close() \n \n \n \n # Calculate cmip model seasonal mean climatology\n cmip_var_season=np.empty([len(ref_models),len(variables),len(seasons)])*np.nan\n \n for i, ref_model in enumerate(ref_models):\n ref_file = glob.glob(os.path.join(cmip_path,'*'+ref_model+'*mo*'+ sites[0]+'.nc')) #read in monthly cmip data\n print('ref_model', ref_model)\n if not ref_file :\n print(ref_model+\" not found!\") \n else:\n fin = cdms2.open(ref_file[0])\n \n for j, variable in enumerate(variables): \n try:\n var = fin (variable)\n #cmip_var_season[i, j, :] = var_seasons(var, seasons)\n cmip_var_season[i, j, :] = climo(var, seasons)\n\n except:\n print(variable+\" not processed for \" + ref_model)\n fin.close() \n # Calculate multi-model mean\n mmm_var_season = np.nanmean(cmip_var_season,axis=0)\n \n\n # Save data as a table\n #header=['Variables','Model','Obs','Model-Obs','CMIP5','RMSE']\n header=['Variables','Model','Obs','Model-Obs','CMIP5']\n var_longname = [ varid_longname[x] for x in variables]\n table_data = np.empty([len(variables),len(seasons),4])\n\n for k, season in enumerate(seasons):\n for j, variable in enumerate(variables):\n table_data[j,k,:] = (round(test_var_season[j,k],3), round(obs_var_season[j,k],3),round(test_var_season[j,k]-obs_var_season[j,k],3),round(mmm_var_season[j,k],3))\n \n with open (output_path+'/metrics/seasonal_mean_table_'+season+'_'+sites[0]+'.csv','w') as f1:\n writer=csv.writer(f1, delimiter=',',lineterminator='\\n', quoting=csv.QUOTE_NONE)\n writer.writerow(header)\n #use tuple to generate csv \n writer.writerows([c]+row.tolist() for c, row in zip(var_longname,table_data[:,k,:]))\n\n \n \n \n \n \n", "sub_path": "arm_diags/src/seasonal_mean.py", "file_name": "seasonal_mean.py", "file_ext": "py", "file_size_in_byte": 5431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.empty", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cdutil.setTimeBoundsMonthly", "line_number": 13, "usage_type": "call"}, {"api_name": "cdutil.times.Seasons", "line_number": 16, "usage_type": "call"}, {"api_name": "cdutil.times", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cdutil.times.Seasons", "line_number": 18, "usage_type": "call"}, {"api_name": "cdutil.times", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 45, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 65, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 74, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 105, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cdms2.open", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.climo", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 125, "usage_type": "call"}, {"api_name": "varid_dict.varid_longname", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 132, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 139, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONE", "line_number": 139, "usage_type": "attribute"}]}
+{"seq_id": "530099107", "text": "import json\n\nfrom django.utils.cache import add_never_cache_headers\nfrom django.views.generic import View\nfrom django.db import DatabaseError\nfrom django.core.exceptions import ValidationError\n\nfrom settings import DEBUG\nfrom .ApiResponse import ApiResponse\nfrom ...UserManager import UserManager\nfrom ...models import (ProProject, HisHistory, UsrUser,\n TskTask, MemMember, SchSchedule,\n AssAssignation)\n\n\nclass Global(View):\n\n def dispatch(self, request, *args, **kwargs):\n\n try:\n self.User = UserManager.Get(request.COOKIES.get('token', None))\n self.Token = request.COOKIES.get('token', None)\n\n if request.method.lower() in ['post', 'put']:\n self.data = json.loads(request.body.decode())\n\n if ('id' in kwargs and\n kwargs['id'] == 'me' and\n self.User is not None):\n\n kwargs['id'] = self.User.id\n\n response = super(Global, self).dispatch(request,\n *args,\n **kwargs)\n\n except json.decoder.JSONDecodeError as e:\n response = ApiResponse.Generate400('JSON Error: ' + str(e))\n\n except UsrUser.DoesNotExist as e:\n response = ApiResponse.Generate404('User not found')\n\n except TskTask.DoesNotExist as e:\n response = ApiResponse.Generate404('Task not found')\n\n except ProProject.DoesNotExist as e:\n response = ApiResponse.Generate404('Project not found')\n\n except MemMember.DoesNotExist as e:\n response = ApiResponse.Generate404('Members not found')\n\n except SchSchedule.DoesNotExist as e:\n response = ApiResponse.Generate404('Schedule not found')\n\n except HisHistory.DoesNotExist as e:\n response = ApiResponse.Generate404('Schedule not found')\n\n except AssAssignation.DoesNotExist as e:\n response = ApiResponse.Generate404('Assignation not found')\n\n except KeyError as e:\n response = ApiResponse.Generate422('Missing: ' + str(e))\n\n except TypeError as e:\n response = ApiResponse.Generate422('Invalid Data')\n\n except ValidationError as e:\n response = ApiResponse.Generate422('Invalid Data')\n\n except DatabaseError as e:\n if DEBUG:\n response = ApiResponse.Generate422('Database Error: ' + str(e))\n else:\n response = ApiResponse.Generate422('Invalid Data')\n\n except BaseException as e:\n response = ApiResponse.Generate500(e)\n\n if 'HTTP_ORIGIN' in request.META:\n response['Access-Control-Allow-Origin'] = request.META['HTTP_ORIGIN']\n else:\n response['Access-Control-Allow-Origin'] = '*'\n\n response['Access-Control-Allow-Methods'] = 'GET,POST,PUT,DELETE'\n response['Access-Control-Allow-Headers'] = 'Content-Type'\n response['Access-Control-Allow-Credentials'] = 'true'\n\n add_never_cache_headers(response)\n\n return response\n", "sub_path": "Focus/api/v1/Global.py", "file_name": "Global.py", "file_ext": "py", "file_size_in_byte": 3140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.views.generic.View", "line_number": 16, "usage_type": "name"}, {"api_name": "UserManager.UserManager.Get", "line_number": 21, "usage_type": "call"}, {"api_name": "UserManager.UserManager", "line_number": 21, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ApiResponse.ApiResponse.Generate400", "line_number": 38, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 38, "usage_type": "name"}, {"api_name": "models.UsrUser.DoesNotExist", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.UsrUser", "line_number": 40, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 41, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 41, "usage_type": "name"}, {"api_name": "models.TskTask.DoesNotExist", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.TskTask", "line_number": 43, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 44, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 44, "usage_type": "name"}, {"api_name": "models.ProProject.DoesNotExist", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.ProProject", "line_number": 46, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 47, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 47, "usage_type": "name"}, {"api_name": "models.MemMember.DoesNotExist", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.MemMember", "line_number": 49, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 50, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 50, "usage_type": "name"}, {"api_name": "models.SchSchedule.DoesNotExist", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.SchSchedule", "line_number": 52, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 53, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 53, "usage_type": "name"}, {"api_name": "models.HisHistory.DoesNotExist", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.HisHistory", "line_number": 55, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 56, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 56, "usage_type": "name"}, {"api_name": "models.AssAssignation.DoesNotExist", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.AssAssignation", "line_number": 58, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate404", "line_number": 59, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 59, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 62, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 62, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 65, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 65, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 67, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 68, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.DatabaseError", "line_number": 70, "usage_type": "name"}, {"api_name": "settings.DEBUG", "line_number": 71, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 72, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 72, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate422", "line_number": 74, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 74, "usage_type": "name"}, {"api_name": "ApiResponse.ApiResponse.Generate500", "line_number": 77, "usage_type": "call"}, {"api_name": "ApiResponse.ApiResponse", "line_number": 77, "usage_type": "name"}, {"api_name": "django.utils.cache.add_never_cache_headers", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "461766479", "text": "#!/usr/bin/python3\nimport pyautogui\nfrom pytube import YouTube\nfrom colorama import Fore, Back, Style\nimport os\nimport time\nimport pyttsx3\nimport datetime\nspeak = pyttsx3.init()\ne = ['q','Q','EXIT','QUIT','quit','exit']\npassword = []\nuser = []\nwhile True:\n os.system(\"clear\")\n print(Fore.RED+\"\"\"\n ____ _______\n | ___| | _____|\n _| |_ | |_____\n |_ _| R | _____| E \n | | | |______ \n |_| |_______|\n \"\"\")\n time.sleep(0.7)\n o = input(Fore.GREEN+\"\"\"\n __________OFFICIAL-CODE_______\n | |\n | [1] CREAT FREE |\n | |\n | [2] LOGIN |\n |______________________________|\n \n>>>>>>>>>>>\"\"\");\n if o == \"1\" or o == \"CREAT FREE\" or o == \"creat free\":\n os.system(\"clear\")\n p = input(Fore.YELLOW+\"\"\"\n ----------------------------------------------\n | GMAIL : \"\"\")\n j = input(Fore.YELLOW+\"\"\" |_____________________________________________\n | PASSWORD : \"\"\")\n print(\" |_____________________________________________\")\n ji = \" Thank YOU for create account FREE\"\n po =f\"\"\"\n ______________________contact__________________\n <<<<<<<<<<<<<| |>>>>>>>>>>>>\n GMAIL : {p} \n PASSWORD : {j} \n <<<<<<<<<<<<<|_______________________________________________|>>>>>>>>>>>\n \"\"\"\n if p == \"\":\n print (\" Gmail --false--\\n\")\n time.sleep(2)\n elif j == \"\":\n print (\" password --false--\\n\")\n time.sleep(2)\n else: \n print (Fore.GREEN+po)\n print (Fore.RED+ji)\n user.append(p)\n password.append(j)\n\n os.system(\"clear\")\n elif o == \"2\" or o == \"LOGIN\" or o == \"login\":\n os.system(\"clear\")\n s = input(\"\"\"\n ----------------------------------------------\n | GMAIL : \"\"\")\n l = input(\"\"\" |_____________________________________________\n | PASSWORD : \"\"\")\n print(\" |_____________________________________________\")\n if s in user and l in password:\n os.system(\"clear\")\n print(Fore.RED+\"ROBOT: hi\")\n lw = Fore.RED+\"ROBOT: OK MY FRIEND\"\n wl = Fore.RED+\"ROBOT : OK SIR\"\n while True:\n me = input(Fore.WHITE+\"me: \")\n speak.say(me)\n if me == \"open terminal\": \n print(lw)\n os.system(\"gnome-terminal\")\n speak.say(\"ok my friend\")\n elif me == \"open firefox\":\n print(lw)\n speak.say(\"ok my friend\")\n os.system(\"firefox\")\n elif me == \"hi\" or me == \"hello\":\n print(Fore.RED+\"ROBOT: DO YOU HELP ?\")\n speak.say(\"fo you help\")\n elif me == \"yes\" or me == \"y\":\n print(Fore.WHITE + \"\"\"ROBOT:\nFACEBOOK \nINSTGRAM\nYOUTUBE\nCLOCK\nHACKING\nGOOGLE\nterminal\nmy ip\nsend brupforce\ncreate file\n:\"\"\")\n speak.say(\"facebook instgram youtube clock hacking google terminal my i p address send burpforce create file\")\n elif me == \"MY IP\" or me == \"my ip\":\n print(lw)\n os.system(\"ifconfig\")\n speak.say(\"ok my friend\")\n elif me == \"facebook\" or me == \"FACEBOOK\":\n print (wl)\n os.system(\"xdg-open https://www.facebook.com\")\n speak.say(\"ok sir\")\n elif me == \"INSTAGRAM\" or me == \"instagram\":\n print (Fore.RED+\"ROBOT : OK OPEN IMSTAGRAM\")\n os.system(\"xdg-open https://www.instagram.com\")\n speak.say(\"ok sir\")\n elif me == \"YOUTUBE\" or me == \"youtube\":\n print (\"ROBOT : OK SIR\")\n speak.say(\"ok sir\")\n os.system(\"xdg-open https://www.youtube.com\")\n elif me == \"clock\":\n print (wl)\n speak.say(\"ok sir\")\n date = datetime.datetime.now()\n speak.say(date)\n print(date)\n elif me == \"hacking\":\n print (wl)\n speak.say(\"ok sir\")\n os.system(\"xdg-open https://www.blackhat.com\")\n elif me in e:\n speak.say(\"ok exit\")\n print(Fore.RED+\"ROBOT:EXIT NOw\")\n exit()\n elif me == \"create file\": \n print (Fore.RED+\"ROBOT : OK CREAT FILE\")\n speak.say(\"ok creat file thanl you \")\n speak.runAndWait()\n o = input(\"name file for creat:\")\n f = open(o,\"w\")\n f.write(\"Editor : Official-coDe\")\n f.close()\n elif me == \"terminal\": \n while True:\n print (lw)\n speak.say(\"ok my friend\")\n speak.runAndWait()\n i = input(\"command: \")\n speak.say(i)\n speak.runAndWait()\n os.system(i)\n if i == \"back\":\n break\n print(Fore.RED+\"ROBOT : OK BACK\")\n speak.say(\"back\")\n speak.runAndWait() \n elif me == \"google\":\n os.system(\"clear\")\n print(\"GooGlE\")\n while True:\n w = input(Fore.RED+\"\"\"\n>>>>>>>>search:\"\"\")\n if w == \"back\":\n break\n print(\"ROBOT : OK BACK\")\n speak.say(\"back\") \n speak.runAndWait()\n else:\n os.system(\"xdg-open https://www.google.com/search?q=\"+w) \n elif me == \"send burpforce\":\n print (Fore.RED+\"ROBOT : Brup force attack\")\n speak.say('brup force attack')\n speak.runAndWait()\n ui = int(input(\"time:\"))\n iu = input(\"text:\")\n while True:\n time.sleep(ui)\n pyautogui.typewrite(iu)\n pyautogui.press('enter')\n speak.runAndWait()\n else:\n print (\"Gmail or password false\\n\")\n time.sleep(2)\n elif o in e:\n exit()\n os.system(\"clear\")\n else :\n print (\"what is {}\".format(o))\n time.sleep(1)\n", "sub_path": "speak.py", "file_name": "speak.py", "file_ext": "py", "file_size_in_byte": 5890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pyttsx3.init", "line_number": 9, "usage_type": "call"}, {"api_name": "os.system", "line_number": 14, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 15, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 24, "usage_type": "name"}, {"api_name": "os.system", "line_number": 34, "usage_type": "call"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 35, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 35, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 38, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 56, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 56, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 57, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 57, "usage_type": "name"}, {"api_name": "os.system", "line_number": 61, "usage_type": "call"}, {"api_name": "os.system", "line_number": 63, "usage_type": "call"}, {"api_name": "os.system", "line_number": 71, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 72, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 72, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 73, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 73, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 74, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 74, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 76, "usage_type": "name"}, {"api_name": "os.system", "line_number": 80, "usage_type": "call"}, {"api_name": "os.system", "line_number": 85, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 87, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 87, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 90, "usage_type": "name"}, {"api_name": "os.system", "line_number": 105, "usage_type": "call"}, {"api_name": "os.system", "line_number": 109, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 112, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 112, "usage_type": "name"}, {"api_name": "os.system", "line_number": 113, "usage_type": "call"}, {"api_name": "os.system", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 128, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 131, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 131, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 134, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 134, "usage_type": "name"}, {"api_name": "os.system", "line_number": 149, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 152, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 152, "usage_type": "name"}, {"api_name": "os.system", "line_number": 156, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 159, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 159, "usage_type": "name"}, {"api_name": "os.system", "line_number": 167, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 169, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 169, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 175, "usage_type": "call"}, {"api_name": "pyautogui.typewrite", "line_number": 176, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 177, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "os.system", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 187, "usage_type": "call"}]}
+{"seq_id": "151402731", "text": "import csv # lib csv for excel\nimport serial # lib serial for read the port usb Arduino uno\nfrom datetime import datetime\nimport os\nimport datetime \nimport time\n\n\nser = serial.Serial(\"COM3\", 9600) # open serial port\nser.flushInput() # input data Arduino uno\ndef createfile():\n current_date_and_time = datetime.datetime.now().strftime('%Y-%m-%d')\n current_date_and_time_string = str(current_date_and_time)\n file_name = current_date_and_time_string + \".csv\"\n if not os.path.isfile(file_name):\n with open(\"file_name.csv\", \"a\", newline='') as f: # create and open the file csv\n writer = csv.writer(f, delimiter=\",\") # Split the string, using comma, followed by a space, as a separator\n writer.writerow(\n [\"date\", \"Time\", \"Thermostat\", \"Temperature\", \"Humidity\"]) # titles for columns file excel\n\n f.close() # close file test_file.csv\n else:\n pass\n\nwhile True:\n createfile()\n \n ser_bytes = ser.readline().decode().strip().split(',') # use split(',') to seperate ser_byte string to list\n new_ser_bytes = [float(i) for i in ser_bytes] # using list comprehension to perform conversion to float\n t = time.localtime() # time location\n decoded_time1 = time.strftime('%Y-%m-%d', t) # for date\n decoded_time2 = time.strftime('%H:%M:%S', t) # for time\n\n print(decoded_time1, decoded_time2, ser_bytes) # print date , time, and data Arduino uno\n with open(\"file_name.csv\", \"a\", newline='') as f: # create and open the file csv\n writer = csv.writer(f, delimiter=\",\") # Split the string, using comma, followed by a space, as a separator\n writer.writerow([decoded_time1, decoded_time2, ser_bytes[0], ser_bytes[1], ser_bytes[2]]) # writerow with seperate data, time , data1 and data2\n f.close() # close file test_file.csv\n # 1h for sleep\n time.sleep(3600)", "sub_path": "create_csv.py", "file_name": "create_csv.py", "file_ext": "py", "file_size_in_byte": 1884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 17, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 30, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 32, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "499103376", "text": "from typing import List\n\nfrom utils.misc import UnionFind\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid or not grid[0]:\n return 0\n h, w = len(grid), len(grid[0])\n uf = UnionFind([(r, c) for r in range(h) for c in range(w) if grid[r][c] == '1'])\n for r in range(h):\n for c in range(w):\n if grid[r][c] == '1':\n for y, x in [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]:\n if 0 <= y < h and 0 <= x < w and grid[y][x] == '1':\n uf.union((r, c), (y, x))\n\n return len(uf.size)\n", "sub_path": "week10/number_of_islands.py", "file_name": "number_of_islands.py", "file_ext": "py", "file_size_in_byte": 654, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "utils.misc.UnionFind", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "89124690", "text": "from models.state import State\nfrom models.city import City\nfrom flask import Flask, render_template\nfrom models import storage\n\n\n# create a Flask application object\napp = Flask(__name__)\n\n\n@app.route('/states', strict_slashes=False)\n@app.route('/states/', strict_slashes=False)\ndef states_by_id(id=\"\"):\n states = storage.all(cls=State)\n cities = storage.all(cls=City)\n return render_template('9-states.html',\n states=states.values(),\n cities=cities.values(), id=id)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port='5000')\n", "sub_path": "web_flask/9-states.py", "file_name": "9-states.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "models.storage.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 14, "usage_type": "name"}, {"api_name": "models.state.State", "line_number": 14, "usage_type": "name"}, {"api_name": "models.storage.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 15, "usage_type": "name"}, {"api_name": "models.city.City", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "421084687", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom tagging.registry import register\nfrom tagging.fields import TagField\nfrom tagging.registry import register\nfrom .search import NoteIndex\n\n\nclass EmailServices(models.Model):\n hash = models.CharField(max_length=32)\n status = models.BooleanField(default=False)\n user = models.OneToOneField(User, on_delete=models.CASCADE, unique=True, null=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['hash'],)\n ]\n\n\nclass Note(models.Model):\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n pub_date = models.DateTimeField(auto_now_add=True)\n last_edited_date = models.DateTimeField(auto_now=True)\n subject = models.CharField(max_length=64)\n spec_code = models.CharField(max_length=64)\n short_content = models.TextField(default='')\n content = models.TextField(default='')\n tags = TagField()\n\n def __str__(self):\n return '[' + str(self.pub_date) + '] ' + str(self.spec_code) + ' - ' + str(self.subject)\n\n def indexing(self):\n obj = NoteIndex(\n meta={'id': self.id},\n subject=self.subject,\n posted_date=self.pub_date,\n edited_date=self.last_edited_date,\n spec_code=self.spec_code,\n short_content=self.short_content,\n full_content=self.content,\n )\n obj.save()\n return obj.to_dict(include_meta=True)\n\n class Meta:\n indexes = [\n models.Index(fields=['spec_code']),\n models.Index(fields=['pub_date']),\n models.Index(fields=['last_edited_date'])\n ]\n\n\nclass Comment(models.Model):\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n note = models.ForeignKey(Note, on_delete=models.CASCADE)\n pub_date = models.DateTimeField(auto_now_add=True)\n parent_id = models.IntegerField(default=-1)\n content = models.TextField(max_length=1024)\n\n class Meta:\n indexes = [\n models.Index(fields=['author']),\n models.Index(fields=['pub_date']),\n models.Index(fields=['parent_id']),\n ]\n\n\nclass Rate(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n note = models.ForeignKey(Note, on_delete=models.CASCADE)\n mark = models.IntegerField()\n\n class Meta:\n indexes = [\n models.Index(fields=['user']),\n models.Index(fields=['note']),\n ]\n unique_together = ('user', 'note'),\n\n\nclass Like(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n\n class Meta:\n indexes = [\n models.Index(fields=['user']),\n models.Index(fields=['comment']),\n ]\n unique_together = ('user', 'comment'),\n", "sub_path": "serana/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.db.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models.Index", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "tagging.fields.TagField", "line_number": 28, "usage_type": "call"}, {"api_name": "search.NoteIndex", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.Index", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 70, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 83, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.db.models.Index", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}]}
+{"seq_id": "452669519", "text": "#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.models import load_model\n\nimport datetime\nimport argparse\nimport os\nimport sys\nimport time\n\nsys.path.insert(0, '../src')\nimport dataset_manager as data_manager\nimport neural_network as nn\n\n\nTEST_PATH = \"../data/\"\nMODEL_PATH = '../models/'\nPLOT_PATH = 'plots/'\nMODEL_NAME = 'model.hdf5'\nLOG_NAME = \"signal_process.log\"\nHISTORY_NAME = \"history.png\"\nTHRESHOLD = 0.2\n\n\ndef get_directory_name():\n return 'model_{0:%Y-%m-%d_%H:%M:%S}/' \\\n .format(datetime.datetime.now())\n\n\ndef save_history(history, filename):\n plt.figure()\n plt.plot(history.history['mean_squared_error'])\n plt.plot(history.history['val_mean_squared_error'])\n plt.title('Model accuracy')\n plt.ylabel('Mean square error')\n plt.xlabel('Epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(filename)\n\n\ndef save_log(window_size, architecture, test_eval, precision, exec_time, filename):\n with open(filename, \"w\") as f:\n f.write(\"WINDOW_SIZE:{}\\n\".format(window_size))\n f.write(\"ARCHITECTURE:{}\\n\".format(architecture))\n f.write(\"EXEC_TIME:{}\\n\".format(exec_time))\n f.write(\"TEST_EVAL:{}\\n\".format(test_eval))\n f.write(\"PRECISION:{}\\n\".format(precision))\n\n\ndef save_results(results, plot, filename):\n if plot:\n os.makedirs(PLOT_PATH, exist_ok=True)\n\n with open(filename, \"w\") as f:\n for sequence, prediction, is_signal in results:\n if plot:\n plt.figure()\n plt.plot(range(len(prediction)), prediction)\n plt.savefig(PLOT_PATH + sequence.name + \"-fig.png\")\n\n f.write('{}:{}\\n'.format(sequence.name, is_signal))\n\n\ndef loading_screen(i, length):\n print(\"Processed: {}/{}\".format(i, length), file=sys.stderr)\n\n\ndef train_action(window_size, hidden_layers):\n print(\"Fetching data...\", file=sys.stderr)\n data = data_manager.fetch_protein_data(\"../data/training_data\")\n print(\"Done! Preparing for training...\", file=sys.stderr)\n input_data, signal_data = data_manager.prepare_dataset(data, window_size, loading_screen)\n print(\"Done! Train and test splitting...\", file=sys.stderr)\n train_input, test_input = data_manager.train_test_split(input_data, signal_data)\n print(\"Done\", file=sys.stderr)\n\n print(\"Building signal model\", file=sys.stderr)\n\n # Input layer\n architecture = [input_data[0].shape[0]]\n # Hidden layer\n architecture.extend(hidden_layers)\n # Output layer\n architecture.append(2)\n\n model = nn.build_model(architecture)\n\n directory = MODEL_PATH + get_directory_name()\n try:\n os.makedirs(directory)\n except:\n print(\"There was an error while creating model's sub-directory.\", file=sys.stderr)\n exit(1)\n\n start_time = time.time()\n history = nn.train_model(model, train_input[0], train_input[1],\n filename=directory + MODEL_NAME)\n exec_time = time.time() - start_time\n\n test_eval = model.evaluate(test_input[0], test_input[1])\n\n results, precision = test_sequences(data, model, window_size, eval=True)\n save_history(history, directory + HISTORY_NAME)\n save_log(window_size, architecture, test_eval, precision, exec_time, directory + LOG_NAME)\n print(\"Done.\", file=sys.stderr)\n\n\ndef predict_signal(result):\n index = None\n signals = result[:, 0]\n cleavage = result[:, 1]\n for i, c in enumerate(cleavage):\n if c > THRESHOLD:\n index = i\n break\n if not index: return False\n\n signals = signals[:index]\n maximum = len(signals)\n count = 0\n for s in signals:\n if s > THRESHOLD:\n count += 1\n\n return count / maximum > 0.9\n\n\ndef test_sequences(sequences, model, window_size, eval=False):\n results = []\n count = 0\n count_signal = 0\n for s in sequences:\n result = model.predict(data_manager.prepare_example(s, window_size))\n is_signal = predict_signal(result)\n results.append((s, result, is_signal))\n if eval:\n if (s.label == 1 and is_signal == True) or (s.label == -1 and is_signal == False): count += 1\n\n if is_signal: count_signal += 1\n print(\"{}:{}\".format(s.name, is_signal), file=sys.stderr)\n\n print(\"{}:{}/{}\".format(len(results), count_signal, len(results) - count_signal), file=sys.stderr)\n if not eval: return results\n else: return results, count/len(results)\n\n\ndef test_action(window_size, model_dir, test_file, output_file, plot):\n model = None\n try:\n model = load_model(MODEL_PATH + model_dir + '/' + MODEL_NAME)\n except:\n print(\"No such model! Please specify model with -m flag.\", file=sys.stderr)\n exit(1)\n\n sequences = []\n try:\n sequences = data_manager.get_file_sequences(TEST_PATH + test_file)\n except:\n print(\"No testing data, please put it in /data folder.\", file=sys.stderr)\n exit(1)\n\n try:\n results = test_sequences(sequences, model, window_size)\n\n\n except:\n print(\"Model was built with different window size.\", file=sys.stderr)\n exit(1)\n\n save_results(results, plot, output_file)\n\n\ndef evaluate_action(window_size, model_dir):\n data = data_manager.fetch_protein_data(\"../data/training_data\")\n results, precision = test_sequences(data, load_model(MODEL_PATH + model_dir + '/' + MODEL_NAME), window_size,\n eval=True)\n print(\"Precision:{}\".format(precision))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Peptide classifier program. Program requires to have /data and /models folders.\")\n parser.add_argument(\"--train\", action=\"store_true\",\n help=\"Use this to train model.\")\n parser.add_argument(\"--test\", action=\"store_true\",\n help=\"Use this to do tests with specific model. [-m, -w]\")\n parser.add_argument(\"--evaluate\", action=\"store_true\",\n help=\"Use this to do evaluate model with set it was trained on [-m, -w]\")\n parser.add_argument(\"-w\", \"--window_size\", type=int, default=21,\n help=\"Define used window size, default is 21.\")\n parser.add_argument(\"-p\", \"--plot\", action=\"store_true\",\n help=\"Use this to plot each sequence while testing.\")\n parser.add_argument(\"-m\", \"--model\",\n help=\"Define model's directory, used for testing.\")\n parser.add_argument(\"-o\", \"--output\", default=\"output\",\n help=\"Define output file name.\")\n parser.add_argument(\"-f\", \"--test_file\", default=\"test.fa\",\n help=\"Define test file name, stored in /data.\")\n parser.add_argument(\"-a\", \"--architecture\", nargs='*', default=[128, 64, 16],\n help=\"Define architecture of model. Provide only hidden layers, for example: -a 8 4 will yield\"\n \"architecture INPUTx8x4xOUTPUT.\")\n\n args = parser.parse_args()\n\n if args.train:\n train_action(args.window_size, args.architecture)\n\n if args.test:\n test_action(args.window_size, args.model, args.test_file, args.output, args.plot)\n\n if args.evaluate:\n evaluate_action(args.window_size, args.model)\n", "sub_path": "src/peptide_predictor.py", "file_name": "peptide_predictor.py", "file_ext": "py", "file_size_in_byte": 7269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 65, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 69, "usage_type": "attribute"}, {"api_name": "dataset_manager.fetch_protein_data", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 71, "usage_type": "attribute"}, {"api_name": "dataset_manager.prepare_dataset", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 73, "usage_type": "attribute"}, {"api_name": "dataset_manager.train_test_split", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 77, "usage_type": "attribute"}, {"api_name": "neural_network.build_model", "line_number": 86, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 92, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "neural_network.train_model", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 105, "usage_type": "attribute"}, {"api_name": "dataset_manager.prepare_example", "line_number": 133, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 150, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "dataset_manager.get_file_sequences", "line_number": 157, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 167, "usage_type": "attribute"}, {"api_name": "dataset_manager.fetch_protein_data", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 175, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 181, "usage_type": "call"}]}
+{"seq_id": "234746624", "text": "import numpy as np\nimport pdb\nfrom serial import Serial\nimport cv2\nimport time\nimport math\n\nfrom Image import *\nfrom Utils import *\n\nser = Serial('/dev/ttyACM0', 4800)\nfont = cv2.FONT_HERSHEY_SIMPLEX\ndirection = 0\nImages=[]\nN_SLICES = 4\n\nfor q in range(N_SLICES):\n Images.append(Image())\n\ncapture = cv2.VideoCapture(1) # read the video stream\n# capture.set(3, 320.0) # set the width\n# capture.set(4, 240.0) # set the height\n# capture.set(5, 15) # set the frame rate\ncv2.namedWindow('frame', cv2.WINDOW_FULLSCREEN)\n\nit = 1\n\nwhile cv2.waitKey(1) & 0xff != ord('q'):\n \n flag, img = capture.read()\n # img = cv2.imread('img4.jpg')\n direction = 0\n img = RemoveBackground(img, False)\n if img is not None:\n t1 = time.clock()\n SlicePart(img, Images, N_SLICES)\n for i in range(N_SLICES):\n direction += Images[i].dir\n \n #negative error: right of middle\n #positive error: left of middle\n error1 = Images[0].dir #\n error2 = Images[1].dir \n error3 = Images[2].dir\n error4 = Images[3].dir #error of furthest part of line\n \n slope = (Images[3].x_coord - Images[0].x_coord)/180.0\n theta = round(math.degrees(math.atan(slope)), 2)\n\n fm = RepackImages(Images)\n t2 = time.clock()\n cv2.putText(fm,\"Time: \" + str((t2-t1)*1000) + \" ms\", (10, 470), font, 0.5, (0,0,255), 1, cv2.LINE_AA)\n \n # for i in range(N_SLICES):\n # cv2.imshow(\"part %d\" % i, Images[i].image)\n # print('error1: ', error1)\n # print('error2: ', error2)\n # print('error3: ', error3)\n # print('error4: ', error4, end=\"\\n\\n\")\n\n cv2.imshow(\"frame\", fm)\n print('Slope: ', slope)\n print('Angle: ', theta)\n print('Iteration: ', it)\n\n # binary = \"{0:b}\".format(theta)\n \n ser.write(str.encode(str(theta) + \"\\n\"))\n it = it + 1\n\n # connection.sendall( bytes(str(direction).encode('utf8')) )\n\ncv2.destroyAllWindows()", "sub_path": "VisionFollowing/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.WINDOW_FULLSCREEN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 35, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 48, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 48, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "359045058", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 15 02:27:42 2021\r\n\r\n@author: Kayserend\r\n\"\"\"\r\n# importing button widget from kivy framework\r\nfrom kivy.uix.button import Button\r\nfrom kivy.app import App\r\nfrom kivy.core.window import Window\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.image import Image\r\nfrom kivy.core.audio import SoundLoader\r\nfrom kivy.clock import Clock\r\nfrom kivy.uix.label import Label\r\nfrom random import *\r\nimport time\r\nfrom android.permissions import request_permissions, Permission\r\n\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE])\r\n \r\n# this is the main class which \r\n# will render the whole application\r\n\r\n\r\nclass firstApp(App):\r\n \r\n # method which will render our application\r\n def closeApp(self):\r\n # closing application\r\n App.get_running_app().stop()\r\n # removing window\r\n Window.close()\r\n \r\n def playSound(self):\r\n sound=SoundLoader.load(self.text)\r\n if sound:\r\n sound.volume=0.5\r\n sound.play()\r\n \r\n def playSoundRandom(self):\r\n a=randint(1,100)\r\n if a>=2:\r\n sound=SoundLoader.load(\"bruh.ogg\")\r\n else:\r\n sound=SoundLoader.load(\"test_sound.ogg\")\r\n if sound:\r\n sound.volume=0.5\r\n sound.play()\r\n \r\n \r\n def build(self):\r\n layout=GridLayout(cols=3)\r\n fotito=Image(source=\"./kawaii.jpg\")\r\n fotito2=Image(source=\"./kawaii.jpg\")\r\n #primer boton\r\n #name=\"test_sound.mp3\"\r\n btnsound=Button(text ='what.ogg',color =(0,0,0,0),background_normal='what.jpg')\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n #sound=SoundLoader.load(\"test_sound2.mp3\");\r\n btnsound.bind(on_press=firstApp.playSound)\r\n layout.add_widget(btnsound)\r\n #segundo boton\r\n btnsound=Button(text ='horse.ogg',color =(0,0,0,0),background_normal='horseamazing.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #tercer boton\r\n btnsound=Button(text ='door.ogg',color =(0,0,0,0),background_normal='doory.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #cuarto boton\r\n btnsound=Button(text ='bear.ogg',color =(0,0,0,0),background_normal='oso.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #quinto boton\r\n btnsound=Button(text ='dog.ogg',color =(0,0,0,0),background_normal='perro.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #sexto boton\r\n btnsound=Button(text ='scream.ogg',color =(0,0,0,0),background_normal='aaah.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #septimo boton\r\n btnsound=Button(text ='explosion.ogg',color =(0,0,0,0),background_normal='boom.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #octavo boton\r\n btnsound=Button(text ='aguila.ogg',color =(0,0,0,0),background_normal='pajarito.jpg')\r\n btnsound.bind(on_press=firstApp.playSound)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n #noveno boton\r\n btnsound=Button(text ='NONE',color =(0,0,0,0),background_normal='bruh.jpg')\r\n btnsound.bind(on_press=firstApp.playSoundRandom)\r\n #btnsound.bind(on_press=firstApp.playSound(\"test_sound.mp3\"))\r\n layout.add_widget(btnsound)\r\n \r\n clock=IncrediblyCrudeClock()\r\n Clock.schedule_interval(clock.update, 1)\r\n layout.add_widget(fotito)\r\n #este boton cierra la app\r\n btn=Button(text=\"Bye world\")\r\n btn.bind(on_press=firstApp.closeApp)\r\n layout.add_widget(btn)\r\n layout.add_widget(clock)\r\n return layout\r\n \r\nclass IncrediblyCrudeClock(Label):\r\n\tdef update(self,*args):\r\n\t\tself.text=time.asctime()\r\n\r\n# running the application\r\nfirstApp().run()\r\n", "sub_path": "very_useful_soundboard/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "android.permissions.request_permissions", "line_number": 20, "usage_type": "call"}, {"api_name": "android.permissions.Permission.READ_EXTERNAL_STORAGE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "android.permissions.Permission", "line_number": 20, "usage_type": "name"}, {"api_name": "kivy.app.App", "line_number": 26, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 31, "usage_type": "name"}, {"api_name": "kivy.core.window.Window.close", "line_number": 33, "usage_type": "call"}, {"api_name": "kivy.core.window.Window", "line_number": 33, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 36, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 36, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 44, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 44, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 46, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 46, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 53, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 54, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 55, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 58, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 69, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 74, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 79, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 84, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 89, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 94, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 99, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_interval", "line_number": 105, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 105, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 108, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 114, "usage_type": "name"}, {"api_name": "time.asctime", "line_number": 116, "usage_type": "call"}]}
+{"seq_id": "629948420", "text": "import math as m\nfrom copy import deepcopy\nfrom time import time\n\nimport cv2\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport gcransac as gc\nimport ransac as rc\nfrom utils_helper import *\n\n\n''' homogr\n'adam', 'boat', 'Boston', 'BostonLib', 'BruggeSquare', 'BruggeTower', 'Brussels', 'CapitalRegion', \n'city', 'Eiffel', 'ExtremeZoom', 'graf', 'LePoint1', 'LePoint2', 'LePoint3', 'WhiteBoard'\n'''\nif __name__ == \"__main__\":\n dataset = 'adam'\n src_img, dst_img, gt_M, vpts = load_homogr_datasets(dataset)\n\n # 创建 ORB 特征提取器\n detetor = cv2.ORB_create(2000)\n # 提取 ORB 角点特征点 keypoints,特征点提取区域局部图像 descriptions\n keypoints1, descriptions1 = detetor.detectAndCompute(src_img, None)\n keypoints2, descriptions2 = detetor.detectAndCompute(dst_img, None)\n\n # BF 暴力匹配器\n bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)\n matches = bf.match(descriptions1, descriptions2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # 根据匹配结果构建点对\n src_pts = np.float32([keypoints1[m.queryIdx].pt for m in matches]).reshape(-1, 2)\n dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in matches]).reshape(-1, 2)\n # 获取图像长宽信息 \n h1, w1, _ = np.shape(src_img)\n h2, w2, _ = np.shape(dst_img)\n \n # 输出初始获取的暴力匹配结果\n print(f\"Detect {dataset} features\")\n print(f\"Features found in src image = {len(keypoints1)}\")\n print(f\"Features found in dst image = {len(keypoints2)}\")\n print(f\"Matches number = {len(matches)}\", '\\n')\n\n threshold = 1.0\n match_img_list = []\n H, mask = None, None\n for i in range(2):\n if i == 0:\n print('RANSAC')\n H, mask = rc.findHomography(src_pts, dst_pts, threshold=threshold, conf=0.99, max_iters=10000)\n else:\n print('GC-RANSAC')\n H, mask = gc.findHomography(src_pts, dst_pts, h1, w1, h2, w2, threshold=threshold, conf=0.99, max_iters=10000)\n print('Inliers Number = ', deepcopy(mask).astype(np.float32).sum())\n print('Error = ', getReprojectionError(vpts, H), '\\n')\n", "sub_path": "test_3_iterations.py", "file_name": "test_3_iterations.py", "file_ext": "py", "file_size_in_byte": 2167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.ORB_create", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.BFMatcher", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.NORM_L2", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 35, "usage_type": "call"}, {"api_name": "math.queryIdx", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 36, "usage_type": "call"}, {"api_name": "math.trainIdx", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 39, "usage_type": "call"}, {"api_name": "ransac.findHomography", "line_number": 53, "usage_type": "call"}, {"api_name": "gcransac.findHomography", "line_number": 56, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 57, "usage_type": "attribute"}]}
+{"seq_id": "167784842", "text": "#!/usr/bin/python3\n# coding: utf-8\nimport numpy as np\nimport pickle\nimport sys\nfrom skimage import color\nfrom skimage import io\nfrom skimage.transform import resize\nfrom fbRun import fbRun\nimport numpy as np\nfrom computeTextons import computeTextons\nfrom pathlib import Path\nfrom sklearn.ensemble import RandomForestClassifier\nimport time\nfrom sklearn.metrics import confusion_matrix\nfrom assignTextons import assignTextons\nimport cifar10 as cf\n\n\ndef fileExists(path):\n return Path(path).exists()\n\n\ndef toPickle(obj, name):\n pickle.dump(obj, open(name+'.pkl', \"wb\"))\n\n\ndef loadPickle(name):\n return pickle.load(open(name, \"rb\"))\n\ndef histc(X, bins):\n import numpy as np\n map_to_bins = np.digitize(X, bins)\n r = np.zeros(bins.shape)\n for i in map_to_bins:\n r[i-1] += 1\n return np.array(r)\n\n#evaluating in test data\nk = 100\nn = 10\n\nbestAlgPath = './data/bestAlgo.pkl'\n\nclf = loadPickle(bestAlgPath)\n\ntestTextonMapPath = './data/testTextonMap.pkl'\nif not fileExists(testTextonMapPath):\n print('Loading test images')\n testImgs = loadPickle('./data/testFilterResponses.pkl')\n print('Loading textons')\n textonPath = './data/mapAndTexton'+str(k)+'.pkl'\n textons = loadPickle(textonPath)['textons']\n print('Asigning textons to test images')\n textonMap = assignTextons(testImgs,textons.transpose())\n toPickle(textonMap,'./data/testTextonMap')\nelse:\n textonMap = loadPickle(testTextonMapPath)\n\nprint('Loading test labels')\ntestLabels = cf.load_cifar10('./cifar-10-batches-py/',mode='test')['labels']\n\n\nnTest = len(testLabels)\nrfPred = []\nprint('Evaluating on test set')\nfor t in range(nTest):\n print('\\r {:.2f}%'.format((t+1)*100/nTest),end='')\n img = textonMap[:,t*32:(t+1)*32]\n histo = histc(img.flatten(), np.arange(k))\n rfPred.append(clf.predict([histo])[0])\n\ntestCM = confusion_matrix(testLabels,rfPred)\n\ntoPickle(testCM,'./data/testConfusionMatrix')\nprint()\nprint('Test confusion matrix:')\nprint(testCM)\n\n\n\n\n\n", "sub_path": "05-Textons/code/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pathlib.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "assignTextons.assignTextons", "line_number": 55, "usage_type": "call"}, {"api_name": "cifar10.load_cifar10", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "467190076", "text": "# -*- coding:utf-8 -*-\nimport configparser\n\nimport pymongo\nimport time\n\nfrom src import config\nimport logging\n\nlogging.basicConfig(level=config.LOGGING_LEVEL,\n format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\n\nclass HandleUserInDatabase(object):\n def __init__(self):\n cf = configparser.ConfigParser()\n cf.read('./database.ini', encoding='utf-8')\n address = cf.get('DATABASE', 'address')\n name = cf.get('DATABASE', 'name')\n myclient = pymongo.MongoClient(address)\n mydb = myclient[name]\n self.mycol = mydb[\"User\"]\n\n def save_data(self, uid, name):\n \"\"\"\n 保存uid\n :param uid:\n :return:\n \"\"\"\n # 检查是否有重复,这里调用前应该检查的,但是重复检查以防出错\n try:\n if len(self.mycol.find_one({\"uid\": uid})) > 0:\n logging.warning('repeat uid')\n return\n except:\n self.mycol.insert(\n {'uid': uid, 'name': name, 'followed_time': time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())})\n\n def find_data(self, uid):\n \"\"\"\n 查询是否有该uid\n :param uid:\n :return:\n \"\"\"\n if len(self.mycol.find_one({\"uid\": uid})) > 0:\n return True\n return False\n\n def get_total(self):\n \"\"\"\n 返回总共个数\n :return:\n \"\"\"\n return self.mycol.find().count()\n", "sub_path": "src/systemTools/HandleUserInDatabase.py", "file_name": "HandleUserInDatabase.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "src.config.LOGGING_LEVEL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "src.config", "line_number": 10, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 16, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 33, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "108073961", "text": "import asyncio\nimport logging\nimport random\nimport collections\nimport itertools\nfrom datetime import datetime\nfrom discord.ext import commands, tasks\nfrom hourai.cogs import BaseCog\nfrom hourai.db.models import Username\nfrom sqlalchemy import func\nfrom sqlalchemy.exc import OperationalError\n\nMAX_STORED_USERNAMES = 20\n\n\nclass UsernameLogging(BaseCog):\n \"\"\" Cog for logging username changes. \"\"\"\n\n def __init__(self, bot):\n super().__init__()\n self.bot = bot\n # self.cleanup_username_histories.start()\n self.pending_ids = None\n self.offset = 0\n\n def cog_unload(self):\n # self.cleanup_username_histories.cancel()\n pass\n\n @commands.Cog.listener()\n async def on_user_update(self, before, after):\n if before.name == after.name:\n return\n assert before.id == after.id\n self.bot.loop.create_task(self.log_username_change(after))\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n if msg.webhook_id is not None:\n return\n self.bot.loop.create_task(self.log_username_change(msg.author))\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n self.bot.loop.create_task(self.log_username_change(member))\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n self.bot.loop.create_task(self.log_username_change(member))\n\n @commands.Cog.listener()\n async def on_member_ban(self, guild, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_member_unban(self, guild, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_group_join(self, group, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @commands.Cog.listener()\n async def on_group_remove(self, group, user):\n self.bot.loop.create_task(self.log_username_change(user))\n\n @tasks.loop(seconds=0.1)\n async def cleanup_username_histories(self):\n frame_size = 5000\n try:\n with self.bot.create_storage_session() as session:\n ids = session.query(Username.user_id) \\\n .group_by(Username.user_id) \\\n .offset(self.offset).limit(frame_size)\n\n ids = [x[0] for x in ids]\n\n if len(ids) <= 0:\n self.offset = 0\n return\n\n keys = lambda u: u.user_id\n\n usernames = session.query(Username) \\\n .filter(Username.user_id.in_(ids)) \\\n .all()\n usernames = list(usernames)\n usernames.sort(key=keys)\n for user_id, names in itertools.groupby(usernames, key=keys):\n self.merge_names(names, session)\n\n if len(session.deleted) > 0:\n self.log_changes(session)\n session.commit()\n else:\n self.offset += frame_size\n except Exception:\n self.bot.logger.exception('Exception while clearing histories:')\n\n @cleanup_username_histories.before_loop\n async def before_cleanup_username_histories(self):\n await self.bot.wait_until_ready()\n\n @commands.command()\n @commands.is_owner()\n async def refresh(self, ctx):\n async with ctx.typing():\n await asyncio.gather(*[self.log_username_change(user)\n for user in ctx.bot.users])\n await ctx.send(':thumbsup:')\n\n async def log_username_change(self, user):\n # Don't log system or webhook accounts\n if int(user.discriminator) == 0:\n return\n\n timestamp = datetime.utcnow()\n\n def create_username():\n return Username(user_id=user.id, name=user.name,\n timestamp=timestamp,\n discriminator=user.discriminator)\n logged = False\n backoff = 1\n while not logged:\n try:\n with self.bot.create_storage_session() as session:\n usernames = session.query(Username) \\\n .filter_by(user_id=user.id) \\\n .order_by(Username.timestamp.desc())\n usernames = list(usernames)\n if any(n.name == user.name for n in usernames):\n return\n username = create_username()\n usernames.append(username)\n filtered = self.merge_names(usernames, session)\n if username in filtered:\n session.add(username)\n self.log_changes(session)\n session.commit()\n logged = True\n except OperationalError:\n msg = f'OperationalError: Retrying in {backoff} seconds.'\n logging.error(msg)\n delta = (random.random() - 0.5) / 5\n await asyncio.sleep(backoff * (1 + delta))\n backoff *= 2\n if backoff >= 10:\n raise\n\n def log_changes(self, session):\n if len(session.deleted) > 0:\n output = '\\n'.join(f'Deleting: {str(n)}'\n for n in session.deleted)\n self.bot.logger.info(output)\n if len(session.dirty) > 0:\n output = '\\n'.join(f'Updating: {str(n)}'\n for n in session.dirty)\n self.bot.logger.info(output)\n if len(session.new) > 0:\n output = '\\n'.join(f'Adding: {str(n)}'\n for n in session.new)\n self.bot.logger.info(output)\n\n def merge_names(self, names, session):\n names = list(names)\n if len(names) <= 1:\n return names\n names.sort(key=lambda u: u.timestamp, reverse=True)\n changed = True\n removal = set()\n while changed:\n removal.clear()\n for i, after in enumerate(names[:-1]):\n for j, before in enumerate(names[i+1:]):\n if before.name != after.name or before is after:\n continue\n before.discriminator = (before.discriminator or\n after.discriminator)\n session.add(before)\n try:\n session.delete(after)\n except Exception:\n pass\n removal.add(i)\n break\n changed = len(removal) > 0\n names = [u for idx, u in enumerate(names)\n if idx not in removal]\n if len(names) > MAX_STORED_USERNAMES:\n # Assumes the ordering is maintained\n for name in names[MAX_STORED_USERNAMES:]:\n session.delete(name)\n names = names[:MAX_STORED_USERNAMES]\n return names\n", "sub_path": "hourai/extensions/logging/username_logging.py", "file_name": "username_logging.py", "file_ext": "py", "file_size_in_byte": 7080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "hourai.cogs.BaseCog", "line_number": 16, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 30, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 37, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 43, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 47, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 47, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 47, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 51, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 51, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 51, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 55, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 55, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 55, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 59, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 59, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 63, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 63, "usage_type": "name"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 72, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 72, "usage_type": "name"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 73, "usage_type": "name"}, {"api_name": "hourai.db.models.Username", "line_number": 84, "usage_type": "argument"}, {"api_name": "hourai.db.models.Username.user_id.in_", "line_number": 85, "usage_type": "call"}, {"api_name": "hourai.db.models.Username.user_id", "line_number": 85, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 85, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.ext.tasks.loop", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 67, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 108, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 104, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 104, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 105, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 117, "usage_type": "name"}, {"api_name": "hourai.db.models.Username", "line_number": 120, "usage_type": "call"}, {"api_name": "hourai.db.models.Username", "line_number": 128, "usage_type": "argument"}, {"api_name": "hourai.db.models.Username.timestamp.desc", "line_number": 130, "usage_type": "call"}, {"api_name": "hourai.db.models.Username.timestamp", "line_number": 130, "usage_type": "attribute"}, {"api_name": "hourai.db.models.Username", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 142, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 144, "usage_type": "call"}, {"api_name": "random.random", "line_number": 145, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 146, "usage_type": "call"}]}
+{"seq_id": "16773158", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom .models import items\nfrom .forms import itemForm, UserCreateForm, AuthenticateForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\ndef signup(request):\n if request.method == \"POST\":\n form = UserCreateForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(**form.cleaned_data)\n login(new_user)\n # redirect, or however you want to get to the main view\n return HttpResponseRedirect('item_new.html')\n else:\n form = UserCreateForm()\n\n return render(request, 'signup.html', {'form': form})\ndef index(request, auth_form=None, user_form=None):\n # User is logged in\n if request.user.is_authenticated():\n item = items.objects.all()\n user = request.user\n return render(request,\n 'toDoApp/item_list.html',\n {'item': item, })\n else:\n # User is not logged in\n auth_form = auth_form or AuthenticateForm()\n user_form = user_form or UserCreateForm()\n\n return render(request,\n 'signup.html',\n {'auth_form': auth_form, 'user_form': user_form, })\n\ndef item_list(request):\n item = items.objects.all()\n return render(request, 'toDoApp/item_list.html',{'item':item})\n\ndef item_detail(request, pk):\n items.objects.get(pk=pk)\n item = get_object_or_404(items, pk=pk)\n return render(request, 'toDoApp/item_detail.html', {'item':item})\n\ndef item_new(request):\n if request.method == \"POST\":\n form = itemForm(request.POST)\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('item_detail', pk=item.pk)\n else:\n form = itemForm()\n return render(request, 'toDoApp/item_edit.html', {'form':form})\n\ndef item_edit(request, pk):\n item = get_object_or_404(items, pk=pk)\n if request.method==\"POST\":\n form = itemForm(request.items, instance=item)\n if form.is_valid():\n item = form.save(commit=False)\n item.save()\n return redirect('item_detail', pk=item.pk)\n else:\n form = itemForm(instance=item)\n return render(request, 'toDoApp/item_edit.html',{'form':form})\n\ndef login_view(request):\n if request.methon==\"POST\":\n form = AuthenticateForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n #successful\n return redirect('/')\n else:\n #failed\n return index(request, auth_form=form)\n return redirect('/')\n\ndef logout_view(request):\n logout(request)\n return redirect ('/')\n", "sub_path": "toDoApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "forms.UserCreateForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.UserCreateForm", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "models.items.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "forms.AuthenticateForm", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.UserCreateForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "models.items.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "models.items.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.items.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.items", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 46, "usage_type": "call"}, {"api_name": "models.items", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.items", "line_number": 61, "usage_type": "argument"}, {"api_name": "forms.itemForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "forms.itemForm", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "forms.AuthenticateForm", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 85, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 86, "usage_type": "call"}]}
+{"seq_id": "206256201", "text": "from selenium import webdriver\nfrom models.addressbook_app import AddressBookApp\ndriver = webdriver.Chrome()\nwd = AddressBookApp(driver, \"http://localhost/addressbook/\")\nwd.login('admin', 'secret')\nwd.open_group_page()\nt = driver.find_elements_by_name(\"selected[]\")\nprint(t[1].text)\nprint(t[1].tag_name)\nprint(t[1].id)\nprint(t[2].get_attribute(\"value\"))\nwd.quit()", "sub_path": "test_script.py", "file_name": "test_script.py", "file_ext": "py", "file_size_in_byte": 363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 3, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 3, "usage_type": "name"}, {"api_name": "models.addressbook_app.AddressBookApp", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "194326552", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport time\nimport logging\n\nimport Chain\n\nimport BCDataStream\nimport deserialize\nimport util\nimport base58\nimport db\nfrom exception import DBException\n\nLOG = logging.getLogger('DataStore')\n\n\nWORK_BITS = 304 # XXX more than necessary.\n\nNULL_PUBKEY_HASH = \"\\0\" * Chain.PUBKEY_HASH_LENGTH\nNULL_PUBKEY_ID = 0\nPUBKEY_ID_NETWORK_FEE = NULL_PUBKEY_ID\n\n# Size of the script and pubkey columns in bytes.\nMAX_SCRIPT = 1000000\nMAX_PUBKEY = 65\n\nSCRIPT_ADDRESS_RE = re.compile(\"\\x76\\xa9\\x14(.{20})\\x88\\xac\\x61?\\\\Z\", re.DOTALL)\nSCRIPT_PUBKEY_RE = re.compile(\n \".((?<=\\x41)(?:.{65})|(?<=\\x21)(?:.{33}))\\xac\\\\Z\", re.DOTALL)\nSCRIPT_NETWORK_FEE = '\\x6a'\n\nclass DataStore(object):\n\n def __init__(store, bytes):\n store.db = db\n store.commit_bytes = bytes #default bytes\n store.bytes_since_commit = 0\n store._blocks = {}\n store.init_binfuncs()\n store.init_chains()\n\n def init_chains(store):\n \n store.chains_by = lambda: 0\n store.chains_by.id = {}\n store.chains_by.name = {}\n store.chains_by.magic = {}\n \n chains = store.db.chain_get_all()\n \n for chain_id, magic, chain_name, chain_code3, address_version, script_addr_vers, \\\n chain_policy, chain_decimals in chains:\n \n chain = Chain.create(\n id = int(chain_id),\n magic = store.binout(magic),\n name = unicode(chain_name),\n code3 = chain_code3 and unicode(chain_code3),\n address_version = store.binout(address_version),\n script_addr_vers = store.binout(script_addr_vers),\n policy = unicode(chain_policy),\n decimals = None if chain_decimals is None else int(chain_decimals))\n\n store.chains_by.id[chain.id] = chain\n store.chains_by.name[chain.name] = chain\n store.chains_by.magic[bytes(chain.magic)] = chain\n \n \n def import_block(store, b, chain=None):\n\n tx_hash_array = []\n all_txins_linked = True\n \n b['value_in'] = 0\n b['value_out'] = 0\n b['value_destroyed'] = 0\n\n\n # 写入交易数据 获得value_in, value_out, value_destroyed\n for pos in xrange(len(b['transactions'])):\n tx = b['transactions'][pos]\n\n if 'hash' not in tx:\n tx['hash'] = chain.transaction_hash(tx['__data__'])\n\n tx_hash_array.append(tx['hash'])\n tx['tx_id'] = store.db.tx_find_id_and_value(tx, pos == 0)\n\n if tx['tx_id']:\n all_txins_linked = False\n else:\n tx['tx_id'] = store.import_tx(tx, pos == 0, chain)\n if tx.get('unlinked_count', 1) > 0:\n all_txins_linked = False\n\n if tx['value_in'] is None:\n b['value_in'] = None\n elif b['value_in'] is not None:\n b['value_in'] += tx['value_in']\n b['value_out'] += tx['value_out']\n b['value_destroyed'] += tx['value_destroyed']\n\n\n # block 表中写入数据\n block_id = int(store.new_id(\"block\"))\n b['block_id'] = block_id\n\n if b['hashMerkleRoot'] != chain.merkle_root(tx_hash_array):\n raise MerkleRootMismatch(b['hash'], tx_hash_array)\n\n #寻找父节点\n hashPrev = b['hashPrev']\n is_genesis = hashPrev == chain.genesis_hash_prev\n\n (prev_block_id, prev_height, prev_work, prev_satoshis,\n prev_seconds, prev_ss, prev_total_ss, prev_nTime) = (\n (None, -1, 0, 0, 0, 0, 0, b['nTime'])\n if is_genesis else\n store.db.find_prev(store.hashin(hashPrev)))\n\n b['prev_block_id'] = prev_block_id\n b['height'] = None if prev_height is None else prev_height + 1\n b['chain_work'] = util.calculate_work(store.binout_int(prev_work), b['nBits'])\n\n b['seconds'] = None if prev_seconds is None else (prev_seconds+b['nTime']-prev_nTime)\n \n if prev_satoshis is None or prev_satoshis < 0 or b['value_in'] is None:\n b['satoshis'] = -1-b['value_destroyed']\n else:\n b['satoshis'] = prev_satoshis+b['value_out']-b['value_in']-b['value_destroyed']\n\n if prev_satoshis is None or prev_satoshis < 0:\n ss_created = None\n b['total_ss'] = None\n else:\n ss_created = prev_satoshis * (b['nTime'] - prev_nTime)\n b['total_ss'] = prev_total_ss + ss_created\n\n if b['height'] is None or b['height'] < 2:\n b['search_block_id'] = None\n else:\n b['search_block_id'] = store.get_block_id_at_height(\n util.get_search_height(int(b['height'])),\n None if prev_block_id is None else int(prev_block_id))\n\n # Insert the block table row.\n try:\n bk = {\"block_id\" : block_id,\n \"height\": b['height'],\n \"bhash\": store.hashin(b['hash']),\n \"ntime\": store.intin(b['nTime']),\n \"mroot\": store.hashin(b['hashMerkleRoot']),\n \"version\": store.intin(b['version']),\n \"height\": b['height'],\n \"prev_block_id\": prev_block_id,\n \"chain_work\": store.binin_int(b['chain_work'], WORK_BITS),\n \"nbits\": store.intin(b['nBits']),\n \"nonce\": store.intin(b['nNonce']),\n \"value_in\": store.intin(b['value_in']),\n \"value_out\": store.intin(b['value_out']),\n \"satoshis\": store.intin(b['satoshis']),\n \"seconds\": store.intin(b['seconds']),\n \"total_ss\": store.intin(b['total_ss']),\n \"txs\": len(b['transactions']), \n \"search_id\": b['search_block_id']\n }\n store.db.insert_block(bk)\n \n except DBException:\n #异常出错\n raise\n\n # block_tx 表中写入数据\n for tx_pos in xrange(len(b['transactions'])):\n tx = b['transactions'][tx_pos]\n store.db.insert_block_tx(block_id, tx, tx_pos)\n LOG.info(\"block_tx %d %d\", block_id, tx['tx_id'])\n\n\n # block 表和block_txin 其他项写入\n if b['height'] is not None:\n store.populate_block_txin(block_id)\n if all_txins_linked or store.db.get_unlinked_txins(block_id) <= 0:\n b['ss_destroyed'] = store.db.get_block_ss_destroyed(\n block_id, b['nTime'],\n map(lambda tx: tx['tx_id'], b['transactions']))\n if ss_created is None or prev_ss is None:\n b['ss'] = None\n else:\n b['ss'] = prev_ss + ss_created - b['ss_destroyed']\n store.db.update_new_block(store.intin(b['ss']),store.intin(b['ss_destroyed']),block_id) \n else:\n b['ss_destroyed'] = None\n b['ss'] = None\n\n \n # 写入block_next 或写入 orphan_block\n if prev_block_id:\n store.db.insert_block_next(prev_block_id, block_id)\n elif not is_genesis:\n store.db.insert_orphan_block(block_id, store.hashin(b['hashPrev']))\n\n for row in store.db.get_orphan_block_id(store.hashin(b['hash'])):\n (orphan_id,) = row\n store.db.update_prev_block_id(block_id, orphan_id)\n store.db.insert_block_next(block_id, orphan_id)\n store.db.delete_orphan_block(orphan_id)\n \n # 处理孤儿块的问题\n store.offer_block_to_chains(b, chain.id)\n return block_id\n\n\n def import_tx(store, tx, is_coinbase, chain):\n \n tx_id = store.new_id(\"tx\")\n dbhash = store.hashin(tx['hash'])\n version = store.intin(tx['version'])\n locktime = store.intin(tx['lockTime'])\n \n if 'size' not in tx:\n tx['size'] = len(tx['__data__'])\n\n store.db.insert_tx(tx_id, dbhash, version, locktime, tx['size']) \n \n # 导入交易的 outputs.\n tx['value_out'] = 0\n tx['value_destroyed'] = 0\n for pos in xrange(len(tx['txOut'])):\n txout = tx['txOut'][pos]\n tx['value_out'] += txout['value']\n txout_id = store.new_id(\"txout\")\n\n pubkey_id = store.script_to_pubkey_id(chain, txout['scriptPubKey'])\n if pubkey_id is not None and pubkey_id <= 0:\n tx['value_destroyed'] += txout['value']\n\n\n txout_value = store.intin(txout['value'])\n scriptPubkey = store.binin(txout['scriptPubKey'])\n store.db.insert_txout(txout_id, tx_id, pos, txout_value, scriptPubkey, pubkey_id)\n \n for row in store.db.get_txin_by_txout_pos(dbhash, pos): \n (txin_id,) = row\n store.db.update_txin(txout_id, txin_id)\n store.db.delete_unlinked_txin(txin_id)\n\n # 导入交易的inputs.\n tx['value_in'] = 0\n tx['unlinked_count'] = 0\n for pos in xrange(len(tx['txIn'])):\n txin = tx['txIn'][pos]\n txin_id = store.new_id(\"txin\")\n\n if is_coinbase:\n txout_id = None\n else:\n prevout_hash = store.hashin(txin['prevout_hash'])\n txout_id, value = store.db.lookup_txout(prevout_hash, txin['prevout_n'])\n if value is None:\n tx['value_in'] = None\n elif tx['value_in'] is not None:\n tx['value_in'] += value\n\n\n sequence = store.intin(txin['sequence'])\n scriptSig = store.binin(txin['scriptSig']) \n store.db.insert_txin(txin_id, tx_id, pos, txout_id, scriptSig, sequence)\n \n if not is_coinbase and txout_id is None:\n tx['unlinked_count'] += 1\n prev_hash = store.hashin(txin['prevout_hash'])\n prev_n = store.intin(txin['prevout_n'])\n store.db.insert_unlinked_txin(txin_id, prev_hash, prev_n)\n \n return tx_id\n\n def offer_block_to_chains(store, b, chain_ids):\n b['top'] = store.adopt_orphans(b, 0, chain_ids, chain_ids)\n for chain_id in chain_ids:\n store._offer_block_to_chain(b, chain_id)\n \n def populate_block_txin(store, block_id):\n \n rows = store.db.get_block_txin(block_id) \n for row in rows:\n (txin_id, oblock_id) = row\n if store.is_descended_from(block_id, int(oblock_id)):\n store.db.insert_block_txin(block_id, txin_id, oblock_id)\n \n \n def is_descended_from(store, block_id, ancestor_id):\n block = store.load_block(block_id)\n ancestor = store.load_block(ancestor_id)\n height = ancestor['height']\n return block['height'] >= height and \\\n store.get_block_id_at_height(height, block_id) == ancestor_id\n \n \n def script_to_pubkey_id(store, chain, script):\n script_type, data = chain.parse_txout_script(script)\n\n if script_type in (Chain.SCRIPT_TYPE_ADDRESS, Chain.SCRIPT_TYPE_P2SH):\n return store.pubkey_hash_to_id(data)\n\n if script_type == Chain.SCRIPT_TYPE_PUBKEY:\n return store.pubkey_to_id(chain, data)\n\n if script_type == Chain.SCRIPT_TYPE_MULTISIG:\n script_hash = chain.script_hash(script)\n multisig_id = store._pubkey_id(script_hash, script)\n\n if not store.selectrow(\"SELECT 1 FROM multisig_pubkey WHERE multisig_id = ?\", (multisig_id,)):\n for pubkey in set(data['pubkeys']):\n pubkey_id = store.pubkey_to_id(chain, pubkey)\n store.sql(\"\"\"\n INSERT INTO multisig_pubkey (multisig_id, pubkey_id)\n VALUES (?, ?)\"\"\", (multisig_id, pubkey_id))\n return multisig_id\n\n if script_type == Chain.SCRIPT_TYPE_BURN:\n return PUBKEY_ID_NETWORK_FEE\n\n return None\n\n def pubkey_hash_to_id(store, pubkey_hash):\n return tore._pubkey_id(pubkey_hash, None)\n \n def pubkey_to_id(store, chain, pubkey):\n pubkey_hash = chain.pubkey_hash(pubkey)\n return store._pubkey_id(pubkey_hash, pubkey)\n\n \n def _pubkey_id(store, pubkey_hash, pubkey):\n dbhash = store.binin(pubkey_hash)\n pubkey_id = store.db.get_pubkey_id(dbhash)\n if pubkey_id:\n return pubkey_id\n else:\n pubkey_id = store.new_id(\"pubkey\")\n if pubkey is not None and len(pubkey) > MAX_PUBKEY:\n pubkey = None\n store.db.insert_pubkey(pubkey_id, dbhash, store.binin(pubkey))\n return pubkey_id\n \n \n \n def adopt_orphans(store, b, orphan_work, chain_ids, chain_mask):\n\n ret = [None]\n def receive(x):\n ret[0] = x\n def doit():\n store._adopt_orphans_1(stack)\n stack = [receive, chain_mask, chain_ids, orphan_work, b, doit]\n while stack:\n stack.pop()()\n return ret[0]\n \n def _adopt_orphans_1(store, stack):\n def doit():\n store._adopt_orphans_1(stack)\n def continuation(x):\n store._adopt_orphans_2(stack, x)\n def didit():\n ret = stack.pop()\n stack.pop()(ret)\n\n b = stack.pop()\n orphan_work = stack.pop()\n chain_ids = stack.pop()\n chain_mask = stack.pop()\n ret = {}\n stack += [ ret, didit ]\n\n block_id = b['block_id']\n # 下一个高度\n height = None if b['height'] is None else int(b['height'] + 1)\n\n\n ret[chain_id] = (b, orphan_work)\n \n #下一个区块的\n for row in store.db.get_next_block(block_id):\n \n next_id, nBits, value_out, value_in, nTime, satoshis = row\n nBits = int(nBits)\n nTime = int(nTime)\n satoshis = None if satoshis is None else int(satoshis)\n new_work = util.calculate_work(orphan_work, nBits)\n\n if b['chain_work'] is None:\n chain_work = None\n else:\n chain_work = b['chain_work'] + new_work - orphan_work\n\n if value_in is None:\n value, count1, count2 = store.db.get_block_tx_info(next_id)\n if count1 == count2 + 1:\n value_in = int(value)\n else:\n LOG.debug(\n \"not updating block %d value_in: %s != %s + 1\",\n next_id, repr(count1), repr(count2))\n else:\n value_in = int(value_in)\n generated = None if value_in is None else int(value_out - value_in)\n\n if b['seconds'] is None:\n seconds = None\n total_ss = None\n else:\n new_seconds = nTime - b['nTime']\n seconds = b['seconds'] + new_seconds\n if b['total_ss'] is None or b['satoshis'] is None:\n total_ss = None\n else:\n total_ss = b['total_ss'] + new_seconds * b['satoshis']\n\n if satoshis < 0 and b['satoshis'] is not None and \\\n b['satoshis'] >= 0 and generated is not None:\n satoshis += 1 + b['satoshis'] + generated\n\n if height is None or height < 2:\n search_block_id = None\n else:\n search_block_id = store.get_block_id_at_height(util.get_search_height(height), int(block_id))\n\n \n store.db.update_block(height, store.binin_int(chain_work, WORK_BITS),\n store.intin(value_in),\n store.intin(seconds), store.intin(satoshis),\n store.intin(total_ss), search_block_id,\n next_id)\n\n ss = None\n\n if height is not None:\n \n store.db.update_candidate(height, next_id)\n store.populate_block_txin(int(next_id))\n\n if b['ss'] is None or store.db.get_unlinked_txins(next_id):\n pass\n else:\n tx_ids = map(lambda row: row[0], store.db.get_block_txids(next_id))\n destroyed = store.db.get_block_ss_destroyed(next_id, nTime, tx_ids)\n ss = b['ss'] + b['satoshis'] * (nTime - b['nTime']) - destroyed\n \n store.db.update_block_ss(store.intin(ss),store.intin(destroyed),next_id)\n \n nb = {\n \"block_id\": next_id,\n \"height\": height,\n \"chain_work\": chain_work,\n \"nTime\": nTime,\n \"seconds\": seconds,\n \"satoshis\": satoshis,\n \"total_ss\": total_ss,\n \"ss\": ss}\n\n stack += [ret, continuation,\n chain_mask, None, new_work, nb, doit]\n \n \n def _adopt_orphans_2(store, stack, next_ret):\n ret = stack.pop()\n for chain_id in ret.keys():\n pair = next_ret[chain_id]\n if pair and pair[1] > ret[chain_id][1]:\n ret[chain_id] = pair\n\n\n def _offer_block_to_chain(store, b, chain_id):\n if b['chain_work'] is None:\n in_longest = 0\n else:\n top = b['top'][chain_id][0]\n row = store.db.get_block_by_chain(chain_id)\n \n if row:\n loser_id, loser_height, loser_work = row\n if loser_id != top['block_id'] and \\\n store.binout_int(loser_work) >= top['chain_work']:\n row = None\n if row:\n # New longest chain.\n in_longest = 1\n to_connect = []\n to_disconnect = []\n winner_id = top['block_id']\n winner_height = top['height']\n while loser_height > winner_height:\n to_disconnect.insert(0, loser_id)\n loser_id = store.db.get_prev_block_id(loser_id)\n loser_height -= 1\n while winner_height > loser_height:\n to_connect.insert(0, winner_id)\n winner_id = store.db.get_prev_block_id(winner_id)\n winner_height -= 1\n loser_height = None\n while loser_id != winner_id:\n to_disconnect.insert(0, loser_id)\n loser_id = store.db.get_prev_block_id(loser_id)\n to_connect.insert(0, winner_id)\n winner_id = store.db.get_prev_block_id(winner_id)\n winner_height -= 1\n for block_id in to_disconnect:\n store.db.disconnect_block(block_id, chain_id)\n for block_id in to_connect:\n store.db.connect_block(block_id, chain_id)\n\n elif b['hashPrev'] == store.chains_by.id[chain_id].genesis_hash_prev:\n in_longest = 1 # Assume only one genesis block per chain. XXX\n else:\n in_longest = 0\n\n store.db.insert_candidate(chain_id, b,in_longest)\n if in_longest > 0:\n store.db.update_chain(top['block_id'], chain_id)\n\n\n\n def offer_existing_block(store, hash, chain_Id):\n block_row = store.db.get_block_by_hash(store.hashin(hash))\n if not block_row:\n return False\n \n b = {\n \"block_id\": block_row[0],\n \"height\": block_row[1],\n \"chain_work\": store.binout_int(block_row[2]),\n \"nTime\": block_row[3],\n \"seconds\": block_row[4],\n \"satoshis\": block_row[5],\n \"ss\": block_row[6],\n \"total_ss\": block_row[7]}\n\n if store.db.exist_candidate(b['block_id'], chain_id):\n LOG.info(\"block %d already in chain %d\", b['block_id'], chain_id)\n else:\n if b['height'] == 0:\n b['hashPrev'] = store.chains_by.id[chain_id].genesis_hash_prev\n else:\n b['hashPrev'] = 'dummy' # Fool adopt_orphans.\n store.offer_block_to_chains(b, [chain_id, ])\n return True\n\n\n def init_binfuncs(store):\n store.binin = util.identity\n store.binin_hex = util.from_hex\n store.binin_int = util.binin_int\n store.binout = util.identity\n store.binout_hex = util.to_hex\n store.binout_int = util.binout_int\n store.intin = util.identity\n store.hashin = util.rev\n store.hashin_hex = util.from_hex\n store.hashout = util.rev\n store.hashout_hex = util.to_hex\n \n \n def get_block_id_at_height(store, height, descendant_id):\n if height is None:\n return None\n while True:\n block = store.load_block(descendant_id)\n if block['height'] == height:\n return descendant_id\n descendant_id = block[\n 'search_id'\n if util.get_search_height(block['height']) >= height else\n 'prev_id']\n\n def cache_block(store, block_id, height, prev_id, search_id):\n assert isinstance(block_id, int), block_id\n assert isinstance(height, int), height\n assert prev_id is None or isinstance(prev_id, int)\n assert search_id is None or isinstance(search_id, int)\n block = {\n 'height': height,\n 'prev_id': prev_id,\n 'search_id': search_id}\n store._blocks[block_id] = block\n return block\n\n def load_block(store, block_id):\n block = store._blocks.get(block_id)\n if block is None:\n \n row = store.db.get_block_id_by_height(block_id)\n if row is None:\n return None\n height, prev_id, search_id = row\n block = store.cache_block(\n block_id, int(height),\n None if prev_id is None else int(prev_id),\n None if search_id is None else int(search_id))\n return block\n \n \n def new_id(store, key):\n return store.db.new_id(key)\n \n \n def flush(store):\n if store.bytes_since_commit > 0:\n store.db.commit()\n LOG.debug(\"commit\")\n store.bytes_since_commit = 0\n\n def rollback(store):\n store.db.rollback()\n \n def commit(store):\n store.db.commit()\n \n def get_block_number(store):\n return store.db.get_block_height()\n ", "sub_path": "bitloader/loader/Datastore.py", "file_name": "Datastore.py", "file_ext": "py", "file_size_in_byte": 22792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "Chain.PUBKEY_HASH_LENGTH", "line_number": 22, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 30, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 30, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Chain.create", "line_number": 57, "usage_type": "call"}, {"api_name": "util.calculate_work", "line_number": 126, "usage_type": "call"}, {"api_name": "util.get_search_height", "line_number": 146, "usage_type": "call"}, {"api_name": "exception.DBException", "line_number": 172, "usage_type": "name"}, {"api_name": "Chain.SCRIPT_TYPE_ADDRESS", "line_number": 306, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_P2SH", "line_number": 306, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_PUBKEY", "line_number": 309, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_MULTISIG", "line_number": 312, "usage_type": "attribute"}, {"api_name": "Chain.SCRIPT_TYPE_BURN", "line_number": 324, "usage_type": "attribute"}, {"api_name": "util.calculate_work", "line_number": 393, "usage_type": "call"}, {"api_name": "util.get_search_height", "line_number": 430, "usage_type": "call"}, {"api_name": "util.identity", "line_number": 554, "usage_type": "attribute"}, {"api_name": "util.from_hex", "line_number": 555, "usage_type": "attribute"}, {"api_name": "util.binin_int", "line_number": 556, "usage_type": "attribute"}, {"api_name": "util.identity", "line_number": 557, "usage_type": "attribute"}, {"api_name": "util.to_hex", "line_number": 558, "usage_type": "attribute"}, {"api_name": "util.binout_int", "line_number": 559, "usage_type": "attribute"}, {"api_name": "util.identity", "line_number": 560, "usage_type": "attribute"}, {"api_name": "util.rev", "line_number": 561, "usage_type": "attribute"}, {"api_name": "util.from_hex", "line_number": 562, "usage_type": "attribute"}, {"api_name": "util.rev", "line_number": 563, "usage_type": "attribute"}, {"api_name": "util.to_hex", "line_number": 564, "usage_type": "attribute"}, {"api_name": "util.get_search_height", "line_number": 576, "usage_type": "call"}]}
+{"seq_id": "48195774", "text": "from random import *\nimport pygame\nfrom pygame.locals import *\nimport time\nimport sys\n\npygame.init()\n\npygame.display.set_caption('OSD2 Tetrix')\n\nrows, cols = 20, 10\narea = [[0 for col in range(cols)] for row in range(rows)]\nscreen = pygame.display.set_mode((cols*30 ,rows*30 + 10),0,32) # +250\nbackground = pygame.Surface(screen.get_size())\nbackground = background.convert()\nbackground.fill((10, 10, 10))\nspeed = 0.5\n\n# 블럭들\ntetrominoes = [0, 0, 0, 0, 0, 0, 0]\n# I : 막대, cyan 컬러\ntetrominoes[0]=[[\n [1,1,1,1]],\n\n [\n [1],\n [1],\n [1],\n [1]]]\n#colors[0]=0x00FFFF\n# T : ㅗ, purple 컬러\ntetrominoes[1]=[[\n [1,1,1],\n [0,1,0]],\n\n [\n [0,1],\n [1,1],\n [0,1]],\n\n [\n [0,1,0],\n [1,1,1]],\n\n [\n [1,0],\n [1,1],\n [1,0]]]\n#colors[1]=0x767676\n# L : ㄱ회전, orange 컬러\ntetrominoes[2]=[[\n [1,1,1],\n [1,0,0]],\n\n [\n [1,1],\n [0,1],\n [0,1]],\n\n [\n [0,0,1],\n [1,1,1]],\n\n [\n [1,0],\n [1,0],\n [1,1]]]\n#colors[2]=0xFFA500\n# J : ㄴ, blue 컬러\ntetrominoes[3]=[[\n [1,0,0],\n [1,1,1]],\n\n [\n [1,1],\n [1,0],\n [1,0]],\n\n [\n [1,1,1],\n [0,0,1]],\n\n [\n [0,1],\n [0,1],\n [1,1]]]\n#colors[3]=0x0000FF\n# Z : z, red 컬러\ntetrominoes[4]=[[\n [1,1,0],\n [0,1,1]],\n\n [\n [0,1],\n [1,1],\n [1,0]]]\n#colors[4]=0xFF0000\n# S : 벼락, green 컬러\ntetrominoes[5]=[[\n [0,1,1],\n [1,1,0]],\n\n [\n [1,0],\n [1,1],\n [0,1]]]\n#colors[5]=0x00FF00\n# O : 네모, yellow 컬러\ntetrominoes[6]=[[\n [1,1],\n [1,1]]]\n#colors[6]=0xFFFF00\n\ndef RawEnd(blocknum, blockstate) :\n return len(tetrominoes[blocknum][blockstate])\n\ndef ColEnd(blocknum, blockstate) : \n end = 0\n for row in range(len(tetrominoes[blocknum][blockstate])) :\n for col in range(4) : \n if tetrominoes[blocknum][blockstate] == 1 :\n if end < col : \n end = col\n return end\n\ndef DrawBlock() :\n screen.lock()\n for col in range(cols) :\n for row in range(rows) :\n if area[row][col] >= 1 :\n pygame.draw.rect(screen, (255,220,143), Rect((col*30,row*30), (27, 27)))\n pygame.display.update()\n screen.unlock()\n\ndef CleanUp() :\n screen.lock()\n screen.fill((10,10,10))\n pygame.display.update()\n screen.unlock()\n\ndef InsertAreaBlock(num) :\n tet = tetrominoes[num][0]\n tetrow = len(tetrominoes[num][0])\n tetcol = len(tetrominoes[num][0][0])\n row = 0\n\n while (tetrow > 0) :\n for col in range(tetcol) : \n area[0 + row][3 + col] = area[0 + row][3 + col] + tet[row][col]\n tetrow = tetrow - 1\n row = row + 1\n\ndef DownBlock(blocklocation, blocknum, blockstate) :\n tet = tetrominoes[blocknum][blockstate]\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n tetlen = len(tet)\n row = 0\n x = blocklocation[0]\n y = blocklocation[1]\n\n if (x + tetlen == 20) :\n return False\n\n for col in range(tetcol) : \n if (x + tetlen < 20 and tet[tetlen - 1][col] > 0) :\n if (area[x + tetlen][y + col] > 0) :\n return False\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n tetlen = len(tet)\n row = 0\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + 1 + row][y + col] = area[x + 1 + row][y + col] + tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n return True\n\ndef CheckHorizon(blocknum, blocklocation) :\n for col in range(10) :\n for row in range(4) :\n if (area[row][col] > 1) :\n return False\n\n return True\n\ndef Rotation(blocklocation, blocknum, blockstate) :\n rotatelen = len(tetrominoes[blocknum])\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n x = blocklocation[0]\n y = blocklocation[1]\n\n blockstate2 = blockstate\n if (blockstate2 + 1 == rotatelen) :\n blockstate2 = 0\n else :\n blockstate2 += 1\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n\n for row in range(tetlen) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n\n tet = tetrominoes[blocknum][blockstate2]\n tetcol = len(tetrominoes[blocknum][blockstate2][0])\n tetlen = len(tet)\n\n for row in range(tetlen) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] + tet[row][col]\n\n return blockstate2\n\ndef Move(blocklocation, blocknum, blockstate, way) :\n rotatelen = len(tetrominoes[blocknum])\n tetcol = len(tetrominoes[blocknum][blockstate][0])\n x = blocklocation[0]\n y = blocklocation[1]\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n row = 0\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col] = area[x + row][y + col] - tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\n tet = tetrominoes[blocknum][blockstate]\n tetlen = len(tet)\n row = 0\n\n while (tetlen > 0) :\n for col in range(tetcol) : \n area[x + row][y + col + way] = area[x + row][y + col + way] + tet[row][col]\n tetlen = tetlen - 1\n row = row + 1\n\ndef Lineall() :\n check = 0\n row2 = 0\n\n for row in range(20) :\n for col in range(10) :\n row2 = 19 - row\n if (area[row2][col] == 1) :\n check += 1\n else :\n break\n if check == 10 :\n return row2\n else :\n check = 0\n\n return 0\n\ndef DownAll(lineall) :\n area2 = area\n row2 = 0\n\n for row in range(rows) :\n print(area[row])\n #print(\"area\")\n\n for row in range(rows) :\n print(area2[row])\n #print(\"area2\")\n\n for col in range(10) : \n area[lineall][col] = 0\n\n for row in range(lineall + 1) : \n for col in range(10) : \n row2 = 19 - row\n if row2 == 0 :\n break\n area[row2][col] = area2[row2 - 1][col]\n\n for row in range(rows) :\n print(area[row])\n #print(\"downall\")\n\n\ndef Run() : \n gameover = False\n noncollision = False\n while not gameover :\n for event in pygame.event.get() :\n speed_up = 1\n spacecheck = 0\n\n if (noncollision == False) :\n while Lineall() != 0 : \n lineall = Lineall()\n DownAll(lineall)\n\n blocknum = randint(0, 6)\n noncollision = True\n InsertAreaBlock(blocknum)\n blocklocation = [0, 3]\n blockstate = 0\n\n if not CheckHorizon(blocknum, blocklocation) :\n noncollision = False\n gameover = True\n break\n\n CleanUp()\n DrawBlock()\n\n if event.type == pygame.QUIT :\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN :\n if event.key == K_UP :\n blockstate = Rotation(blocklocation, blocknum, blockstate)\n CleanUp()\n DrawBlock()\n elif event.key == K_RIGHT :\n if (blocklocation[1] != 10 - ColEnd(blocknum, blockstate)) : \n temp = blockstate\n blockstate = Move(blocklocation, blocknum, blockstate, 1)\n blocklocation[1] += 1\n blockstate = temp\n CleanUp()\n DrawBlock()\n elif event.key == K_LEFT :\n if blocklocation[1] > 0 :\n temp = blockstate\n blockstate = Move(blocklocation, blocknum, blockstate, -1)\n blocklocation[1] -= 1\n blockstate = temp\n CleanUp()\n DrawBlock()\n elif event.key == K_DOWN :\n speed_up = 10\n elif event.key == K_SPACE : \n downboolean2 = DownBlock(blocklocation, blocknum, blockstate)\n blocklocation[0] += 1\n while (downboolean2) :\n downboolean2 = DownBlock(blocklocation, blocknum, blockstate)\n blocklocation[0] += 1\n if (blocklocation[0] == 20 - RawEnd(blocknum, blockstate)) : \n break\n spacecheck = 1\n CleanUp()\n DrawBlock()\n\n if spacecheck == 0 :\n downboolean = DownBlock(blocklocation, blocknum, blockstate)\n if downboolean :\n blocklocation[0] += 1\n elif not downboolean :\n noncollision = False\n\n time.sleep(float(0.1)/speed/speed_up * 3)\n\n #for row in range(rows) :\n # print(area[row])\n #print(\"Cut\")\n\n #if not hasattr(event, 'key') : \n # continue\n\nRun()\n\npygame.quit()", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 9779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 297, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 320, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 321, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 323, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 365, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 376, "usage_type": "call"}]}
+{"seq_id": "171747183", "text": "import sys\nimport base64\n\n\ndef encrypt(msg, key):\n '''\n Encrypt message using one time pad key\n input: msg - message to be encrypted\n key - one time pad key\n output: password\n '''\n while len(key) < len(msg):\n # Increase the length of the key\n diff = len(msg) - len(key)\n key += key[:diff]\n\n # Get the ascii representations of the message and the key\n amsg = list(map(lambda x: ord(x), list(msg)))\n akey = list(map(lambda x: ord(x), list(key[:len(msg)])))\n # XOR the message and the key\n xor = list(map(lambda x, y: x ^ y, amsg, akey))\n # Transform ascii encrypted message into string\n pwd = ''.join(list(map(lambda x: chr(x), xor)))\n # Encode password inn base64\n pwd = base64.b64encode(pwd.encode())\n\n return pwd.decode()\n\notp = \"Never send a human to do a machine's job\"\n\nprint(encrypt(sys.argv[1], otp))\n", "sub_path": "Semester 2/Information Security and Privacy/Homework/Hw2/get_pass.py", "file_name": "get_pass.py", "file_ext": "py", "file_size_in_byte": 888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "base64.b64encode", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "65698623", "text": "from flask import Flask, render_template, Response\r\nimport argparse\r\nfrom flask import url_for\r\nfrom flask import request\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-d\",\"--data\", required = True,\r\n help = \"Data to send\")\r\nargs = vars(ap.parse_args())\r\ndata=args['data']\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n return data\r\n\r\n\r\n\t\r\n\r\ndef shutdown_server():\r\n func = request.environ.get('werkzeug.server.shutdown')\r\n if func is None:\r\n raise RuntimeError('Not running with the Werkzeug Server')\r\n func()\r\n\r\n@app.route('/shutdown', methods=['GET','POST'])\r\ndef shutdown():\r\n shutdown_server()\r\n return 'Server shutting down...'\r\n\t\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', threaded=True)\r\n", "sub_path": "face_recogition/flask_for_cart.py", "file_name": "flask_for_cart.py", "file_ext": "py", "file_size_in_byte": 759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "303130924", "text": "__author__ = 'HXiao'\nfrom rpy2 import robjects as ro\n\n\n\nimport callingR as cr\n\nro.r(\"source('~/Documents/workspace/workSpace/Rscrpits/fundRisk.R')\")\n#ro.r(\"source('~/RScripts/RRiskManagement.R')\")\n\ninnercode = [102000298,102001873]\n#innerType = 1\n\n#cr.RiskManagement.innittt()\nes = cr.RiskManagement.es(innercode)\nprint(es)\n\n#ro.r(\"q()\")\n#sadf n\n", "sub_path": "testR.py", "file_name": "testR.py", "file_ext": "py", "file_size_in_byte": 346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rpy2.robjects.r", "line_number": 8, "usage_type": "call"}, {"api_name": "rpy2.robjects", "line_number": 8, "usage_type": "name"}, {"api_name": "callingR.RiskManagement.es", "line_number": 15, "usage_type": "call"}, {"api_name": "callingR.RiskManagement", "line_number": 15, "usage_type": "attribute"}]}
+{"seq_id": "71677587", "text": "#!/usr/bin/env python\nfrom __future__ import division\nimport sys\nimport os\nimport json\nimport re\nimport copy\nfrom yaml import load, dump\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nclass Parser():\n TYPE_DIMENSION = 'dimension'\n TYPE_DIMENSION_GROUP = 'dimension_group'\n TYPE_MEASURE = 'measure'\n TYPE_FILTER = 'filter'\n\n DEFAULT_TIMEFRAMES = ['date', 'day_of_month', 'day_of_week', 'day_of_week_index', 'day_of_year', 'hour', 'hour_of_day', 'minute', 'month', 'month_name', 'month_num', 'quarter', 'quarter_of_year', 'time', 'time_of_day', 'week', 'week_of_year', 'year' ]\n\n @staticmethod\n def isRawReference(field):\n return re.match('^ *\\$\\{TABLE\\}.[a-z_0-9]+ *$', field, re.MULTILINE)\n\n def __init__(self, stream):\n self.writeStream = stream\n\n @staticmethod\n def isDimensionGroup(field):\n if Parser.TYPE_DIMENSION_GROUP in field:\n return True\n if Parser.TYPE_DIMENSION in field and ('type' in field and field['type'] == 'time'):\n return True\n return False\n\n def fire(self,field,type,field_orig=None):\n if type in [Parser.TYPE_DIMENSION,Parser.TYPE_DIMENSION_GROUP] and 'sql' in field:\n # if raw reference, then remove a bunch of stuff\n if Parser.isRawReference(field['sql']):\n # remove absolute reference\n field_orig.pop('type', None)\n field_orig.pop('sql', None)\n\n if type in [Parser.TYPE_MEASURE] and 'sql' in field:\n if re.search('[ ]*(\\$\\{TABLE\\}.[a-z_0-9]+)[ ]*', field['sql']):\n field_orig['sql'] = re.sub('\\$\\{TABLE\\}.([a-z_0-9]+)',r'${\\1}',field['sql'])\n\n def extractGroup(self,field):\n set = []\n name = field['dimension_group'] if Parser.TYPE_DIMENSION_GROUP in field else field['dimension']\n if 'timeframes' in field:\n for dim in field['timeframes']:\n set.append(name+'_'+dim)\n else:\n # default timeframes\n for dim in Parser.DEFAULT_TIMEFRAMES:\n set.append(name+'_'+dim)\n return set\n\n def dump(self,tree):\n dump(tree, self.writeStream, default_flow_style=False)\n\n\n#\n # Parser that handles sets\n #\nclass baseViewParser(Parser):\n\n def __init__(self, stream):\n self.writeStream = stream\n self.fields = []\n\n def fire(self,field,type,field_orig=None):\n if type in ['dimension','dimension_group'] and 'sql' in field:\n name = field[type]\n # re.search('[ ]*(\\$\\{TABLE\\}.[a-z_]+)[ ]*', field['sql']):\n if field['sql'].startswith('${TABLE}.') and Parser.isRawReference(field['sql']):\n new_field = {}\n new_field[type] = name # e.g. new_field['dimension'] = name\n if 'type' in field:\n new_field['type'] = field['type']\n if 'sql' in field:\n new_field['sql'] = field['sql']\n else:\n new_field['sql'] = '${TABLE}.'+name\n self.fields.append(new_field)\n\n def dump(self,tree):\n new_tree = []\n for directive in tree:\n new_tree.append({\n 'view': {}\n , 'fields': self.fields\n }) # tree is blank at the beginning\n\n dump(new_tree, self.writeStream, default_flow_style=False)\n\n#\n # Parser that handles sets\n #\nclass setParser(Parser):\n sets = {\n 'Clickstream': 1\n , 'Sponsorships': 1\n , 'User Activity': 1\n , 'Consumer': 1\n , 'Email': 1\n }\n\n DIMENSION_ALL_TYPE = 1\n DIMENSION_GROUP_ALL_TYPE = 2\n\n def __init__(self, stream):\n self.writeStream = stream\n\n self.hiddenFields = {\n 'dimension': []\n , 'dimension_group': []\n }\n\n self.resulting_set = {\n 'dimensions': {}\n , 'measures': {}\n , 'all': {\n 'dimension': {}\n , 'dimension_group': {}\n , 'filter': {}\n , 'measure': {}\n }\n }\n\n for key in setParser.sets.keys():\n self.resulting_set['dimensions'][key] = {}\n self.resulting_set['measures'][key] = {}\n\n def fire(self,field,type,field_orig=None):\n name = field[type]\n if type in [Parser.TYPE_DIMENSION,Parser.TYPE_DIMENSION_GROUP,Parser.TYPE_MEASURE]: #,Parser.TYPE_FILTER]:\n if Parser.isDimensionGroup(field):\n dimensions = self.extractGroup(field)\n for dim in dimensions:\n self.resulting_set['all'][Parser.TYPE_DIMENSION][dim] = 2\n else:\n self.resulting_set['all'][type][name] = 1\n\n # hide dimensions and measures\n if type in [Parser.TYPE_DIMENSION]:\n self.hiddenFields['dimension'].append(name)\n if type in [Parser.TYPE_DIMENSION_GROUP]:\n self.hiddenFields['dimension_group'].append(name)\n\n matching_category = self.isFilterMatch(field) or self.isLabelMatch(field)\n if matching_category:\n if Parser.isDimensionGroup(field):\n dimensions = self.extractGroup(field)\n for dim in dimensions:\n self.resulting_set['dimensions'][matching_category][dim] = 2 # since it is part of a group\n elif type == Parser.TYPE_DIMENSION:\n self.resulting_set['dimensions'][matching_category][field[type]] = 1\n elif type == Parser.TYPE_MEASURE:\n self.resulting_set['measures'][matching_category][field[type]] = 1\n # # now add all the filter dependencies\n if 'filters' in field:\n for filter_field in field['filters'].keys():\n self.resulting_set['measures'][matching_category][filter_field] = 1\n\n\n\n #\n # returns set name if match, otherwise None\n #\n def isFilterMatch(self,field):\n #TODO\n return None\n\n #\n # returns set name if match, otherwise None\n #\n def isLabelMatch(self,field):\n if 'view_label' in field:\n for category in self.sets.keys():\n if category in field['view_label']:\n return category\n return None\n\n def dump(self,tree):\n self.writeStream.write('\\tsets:\\n')\n # for category in self.sets.keys():\n # lookmlcatname = category.lower().replace(\" \", \"\")\n # self.writeStream.write('\\t\\tdim_'+lookmlcatname+':\\n')\n # for field in self.resulting_set['dimensions'][category].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # self.writeStream.write('\\t\\tmes_'+lookmlcatname+':\\n')\n # for field in self.resulting_set['measures'][category].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # self.writeStream.write('\\t\\t'+lookmlcatname+':\\n')\n # self.writeStream.write('\\t\\t\\t- dim_'+lookmlcatname+'*\\n')\n # self.writeStream.write('\\t\\t\\t- mes_'+lookmlcatname+'*\\n')\n\n # self.writeStream.write('\\t\\tall_dim:\\n')\n # for field in self.resulting_set['all']['dimension'].keys():\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n self.writeStream.write('\\t\\tall_measures:\\n')\n for field in self.resulting_set['all']['measure'].keys():\n self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n # for field in self.resulting_set['all']['dimension_group']:\n # self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n for field in self.resulting_set['all']['filter'].keys():\n self.writeStream.write('\\t\\t\\t- '+field+'\\n')\n\n self.writeStream.write('\\tfields:\\n')\n\n wrote_groups = {}\n\n for field in self.hiddenFields['dimension']:\n self.writeStream.write('\\t- dimension: '+field+'\\n\\t\\thidden: true\\n')\n for field in self.hiddenFields['dimension_group']:\n self.writeStream.write('\\t- dimension_group: '+field+'\\n\\t\\thidden: true\\n')\n\n\n\ndef processElement(listeners,element):\n if isinstance(element, list):\n for el in element:\n processElement(listeners,el)\n elif isinstance(element, dict):\n element_copy = copy.deepcopy(element)\n for key in element.keys():\n if key in ['dimension','dimension_group','filter','measure']:\n for listener in listeners:\n listener.fire(element_copy,key,element)\n break;\n else:\n processElement(listeners,element[key])\n\n\ndef processView(stream,listeners):\n file_tree = load(stream, Loader=Loader)\n processElement(listeners,file_tree)\n for listener in listeners:\n listener.dump(file_tree)\n stream.close()\n\n#\n # Outputs 3 files:\n # - raw data model (which has only {TABLE}. references, types)\n # - a new copy of the original LookML file with all {TABLE} references replaced with {} names\n # - a set file based on filter matches\n #/\ndef main():\n INPUT_FILE = ''\n OUTPUT_DIRECTORY = ''\n if len(sys.argv) >= 3:\n INPUT_FILE = sys.argv[1]\n OUTPUT_DIRECTORY = sys.argv[2]\n else:\n print(\"./set_builder.py INPUT_LOOKML_FILE OUTPUT_DIRECTORY\")\n sys.exit()\n\n if OUTPUT_DIRECTORY.find('/',len(OUTPUT_DIRECTORY)-1) == -1:\n OUTPUT_DIRECTORY = OUTPUT_DIRECTORY+'/'\n\n file_path = os.path.realpath(INPUT_FILE)\n stream = file(file_path, 'r')\n\n raw_schema_file = os.path.join(OUTPUT_DIRECTORY,'raw_schema.lookml')\n modified_file = os.path.join(OUTPUT_DIRECTORY,'mod_file.lookml')\n set_file = os.path.join(OUTPUT_DIRECTORY,'set_file.lookml')\n\n # start writing out the output for these files immediately\n listeners = []\n with open(raw_schema_file,'w') as raw_f:\n with open(modified_file,'w') as mod_f:\n with open(set_file,'w') as set_f:\n listeners.append(baseViewParser(raw_f))\n listeners.append(Parser(mod_f))\n listeners.append(setParser(set_f))\n processView(stream,listeners)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "set_builder.py", "file_name": "set_builder.py", "file_ext": "py", "file_size_in_byte": 9204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.match", "line_number": 25, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 48, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 63, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 98, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 229, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 240, "usage_type": "call"}, {"api_name": "yaml.Loader", "line_number": 240, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 255, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 256, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 257, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}]}
+{"seq_id": "596614256", "text": "\"\"\"\n.. module MultiCropAndOpenFace\n :synopsis: Script to apply cropping and OpenFace to all videos in a directory.\n\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport subprocess\nimport sys\nimport numpy as np\n\nsys.path.append('/home/gvelchuru/')\nfrom OpenFaceScripts.runners import CropAndOpenFace\n\n\ndef make_vids(path):\n \"\"\"\n Return list of vids not processed yet given a path\n :param path: Path to video directory\n :type path: str\n :return: list of vids to do\n \"\"\"\n folder_components = set(os.path.join(path, x) for x in os.listdir(path))\n return [x for x in glob.glob(os.path.join(path, '*.avi')) if (\n os.path.splitext(x)[0] + '_cropped' not in folder_components or 'au.txt' not in os.listdir(\n os.path.join(path, os.path.splitext(x)[0] + '_cropped')))]\n\n\ndef make_crop_and_nose_files(path):\n crop_file = os.path.join(path, 'crop_files_list.txt')\n nose_file = os.path.join(path, 'nose_files_list.txt')\n\n if not os.path.exists(crop_file):\n crop_path = sys.argv[sys.argv.index('-c') + 1]\n crop_txt_files = CropAndOpenFace.find_txt_files(crop_path)\n json.dump(crop_txt_files, open(crop_file, mode='w'))\n\n if not os.path.exists(nose_file):\n nose_path = sys.argv[sys.argv.index('-n') + 1]\n nose_txt_files = CropAndOpenFace.find_txt_files(nose_path)\n json.dump(nose_txt_files, open(nose_file, mode='w'))\n\n return json.load(open(crop_file)), json.load(open(nose_file))\n\n\nif __name__ == '__main__':\n\n path = sys.argv[sys.argv.index('-id') + 1]\n\n vids = make_vids(path)\n num_GPUs = 2\n processes = []\n indices = np.linspace(0, len(vids), num=num_GPUs + 1)\n for index in range(len(indices) - 1):\n processes.append(subprocess.Popen(\n ['python3', '/home/gvelchuru/OpenFaceScripts/helpers/HalfCropper.py', '-id', path, '-vl',\n str(int(indices[index])), '-vr',\n str(int(indices[index + 1]))],\n env={'CUDA_VISIBLE_DEVICES': '{0}'.format(str(index))}))\n [p.wait() for p in processes]\n", "sub_path": "runners/MultiCropAndOpenFace.py", "file_name": "MultiCropAndOpenFace.py", "file_ext": "py", "file_size_in_byte": 2041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 36, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace.find_txt_files", "line_number": 37, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace", "line_number": 37, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 41, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace.find_txt_files", "line_number": 42, "usage_type": "call"}, {"api_name": "OpenFaceScripts.runners.CropAndOpenFace", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 43, "usage_type": "call"}, {"api_name": "json.load", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "430582480", "text": "from django import forms\nfrom crispy_forms.bootstrap import TabHolder, Tab\nfrom common_data.forms import BootstrapMixin\nfrom django.contrib.auth import authenticate\nfrom crispy_forms.helper import FormHelper\n\nfrom crispy_forms.layout import (Row, \n Column, \n Fieldset,\n Submit, \n Div,\n Layout,\n HTML)\nfrom . import models\nfrom employees.models import Employee\nfrom django_select2.forms import Select2Widget\n\nclass ServiceForm(forms.ModelForm,BootstrapMixin):\n category = forms.ModelChoiceField(models.ServiceCategory.objects.all(), required=False)\n\n class Meta:\n fields = \"__all__\"\n model = models.Service\n\n widgets = {\n 'description':forms.Textarea(attrs={'rows':4, 'cols':15}),\n 'procedure': Select2Widget(attrs={'data-width': '20rem'})\n } \n \n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n 'name',\n 'description',\n Row(\n Column('flat_fee', css_class='form-group col-6'),\n Column('hourly_rate', css_class='form-group col-6'),\n ),\n Row(\n Column('category', css_class='form-group col-4'),\n Column('procedure', css_class='form-group col-4'),\n Column('frequency', css_class='form-group col-4'),\n ),\n 'is_listed',\n Div(Submit('submit', 'Submit'), css_class=\"floating-submit\")\n )\nclass ServiceCategoryForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n fields = \"__all__\"\n model = models.ServiceCategory\n\n\nclass ServicePersonForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n fields = \"__all__\"\n model = models.ServicePerson\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\nclass ServicePersonUpdateForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n exclude = \"employee\",\n model = models.ServicePerson\n\n\nclass ServiceTeamForm(forms.ModelForm, BootstrapMixin):\n #create members in react\n class Meta:\n exclude = \"members\",\n model = models.ServiceTeam\n widgets = {\n \"description\": forms.Textarea(attrs={\"rows\": 4})\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column(\n 'Team Creation Form',\n 'name',\n 'description',\n 'manager',\n css_class=\"col-6\"),\n Column(\n HTML(\n \"\"\"\n Select Service People:
\n \n \"\"\"\n ), css_class=\"col-6\")\n )\n )\n self.helper.add_input(Submit('submit', 'Submit')) \n\nclass ServiceWorkOrderForm(forms.ModelForm, BootstrapMixin):\n #create service people in react\n status = forms.CharField(widget=forms.HiddenInput)\n works_request = forms.ModelChoiceField(\n models.WorkOrderRequest.objects.all(),\n widget=forms.HiddenInput)\n class Meta:\n fields = ['date', 'time', 'expected_duration', 'team', 'status', 'description', 'works_request' ]\n model = models.ServiceWorkOrder\n widgets = {\n 'description': forms.Textarea(attrs={'rows': 4})\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n TabHolder(\n Tab('Form',\n Row(\n Column('date', css_class=\"form group col-6\"),\n Column('time', css_class=\"form group col-6\"),\n ),\n 'works_request',\n 'description',\n 'completed',\n 'expected_duration',\n 'status',\n 'authorized_by',\n 'team',\n 'progress',\n ),\n Tab('Service People',\n HTML(\n \"\"\"\n
\n \"\"\"\n ),\n ),\n )\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n \nclass ServiceWorkOrderCompleteForm(forms.ModelForm, BootstrapMixin):\n progress = forms.CharField(widget=forms.HiddenInput, required=False)\n service_time = forms.CharField(widget=forms.HiddenInput, required=False)\n class Meta:\n fields = [\"progress\"]\n model = models.ServiceWorkOrder\n \n\nclass ServiceWorkOrderAuthorizationForm(BootstrapMixin, forms.Form):\n '''Authorization handled in the functional view work_order_authorize'''\n \n authorized_by = forms.ModelChoiceField(Employee.objects.filter(serviceperson__isnull=False))\n password = forms.CharField(widget=forms.PasswordInput)\n status = forms.ChoiceField(choices=models.ServiceWorkOrder.STATUS_CHOICES)\n\n def clean(self, *args, **kwargs):\n cleaned_data = super().clean(*args, **kwargs)\n password = cleaned_data['password']\n employee = cleaned_data['authorized_by']\n\n if not authenticate(username=employee.user.username, password=password):\n raise forms.ValidationError('The password supplied is incorrect.')\n\n return cleaned_data\n\nclass EquipmentRequisitionForm(forms.ModelForm, BootstrapMixin):\n equipment = forms.CharField(widget=forms.HiddenInput)\n class Meta:\n exclude = \"authorized_by\", \"released_by\", 'received_by', 'returned_date'\n model = models.EquipmentRequisition\n widgets = {\n 'work_order': Select2Widget(attrs={'data-width': '20rem'})\n }\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('date','equipment' , css_class=\"col-sm-6\"), \n Column('work_order', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n \n\n\nclass WorkOrderEquipmentRequisitionForm(forms.ModelForm, BootstrapMixin):\n work_order = forms.ModelChoiceField(models.ServiceWorkOrder.objects.all(), \n widget=forms.HiddenInput)\n equipment = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\", 'received_by', 'returned_date'\n model = models.EquipmentRequisition\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('work_order', 'equipment', css_class=\"col-sm-12\"), css_class=\"form-row\"),\n Row(\n Column('date', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-12\"),\n css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n \n\nclass ConsumablesRequisitionForm(forms.ModelForm, BootstrapMixin):\n consumables = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\",\n model = models.ConsumablesRequisition\n widgets = {\n 'work_order': Select2Widget(attrs={'data-width': '20rem'})\n }\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('date', 'consumables', css_class=\"col-sm-6\"), \n Column('work_order', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n \n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass WorkOrderConsumablesRequisitionForm(forms.ModelForm, BootstrapMixin):\n work_order = forms.ModelChoiceField(models.ServiceWorkOrder.objects.all(), \n widget=forms.HiddenInput)\n consumables = forms.CharField(widget=forms.HiddenInput)\n \n class Meta:\n exclude = \"authorized_by\", \"released_by\",\n model = models.ConsumablesRequisition\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(\n Column('work_order', 'consumables',css_class=\"col-sm-12\"), css_class=\"form-row\"),\n Row(\n Column('date', css_class=\"col-sm-6\"), \n Column('requested_by', css_class=\"col-sm-6\"), css_class=\"form-row\"),\n Row(\n Column('department', css_class=\"col-sm-6\"),\n Column('warehouse', css_class=\"col-sm-6\"), \n css_class=\"form-row\"),\n Row(\n Column('reference', css_class=\"col-sm-12\"),\n css_class=\"form-row\"),\n HTML(\"\"\"
\"\"\")\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\nclass ServiceProcedureForm(forms.ModelForm, BootstrapMixin):\n class Meta:\n exclude = \"required_equipment\", \"required_consumables\"\n model = models.ServiceProcedure\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n TabHolder(\n Tab('Procedure Details',\n 'name',\n 'reference',\n 'author',\n 'description',\n ),\n Tab('procedure steps',\n HTML(\n \"\"\"\n
\n
\n \"\"\"\n )\n ),\n Tab('Select Equipment And Consumables',\n HTML(\n \"\"\"\n
\n \"\"\"\n )\n ),\n )\n )\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass EquipmentReturnForm(BootstrapMixin, forms.Form):\n received_by = forms.ModelChoiceField(Employee.objects.filter(inventorycontroller__isnull=False))\n password = forms.CharField(widget=forms.PasswordInput)\n return_date = forms.DateField()\n requisition = forms.ModelChoiceField(models.EquipmentRequisition.objects.all(), widget=forms.HiddenInput)\n\n def clean(self):\n cleaned_data = super().clean()\n usr = authenticate(\n username=cleaned_data['received_by'].user.username,\n password=cleaned_data['password'])\n\n if not usr:\n raise forms.ValidationError(\n 'The Inventory Controller password is incorrect.')\n \n requisition = cleaned_data['requisition']\n requisition.received_by = cleaned_data['received_by']\n requisition.returned_date = cleaned_data['return_date']\n requisition.save()\n return cleaned_data\n\nclass WorkOrderRequestForm(BootstrapMixin, forms.ModelForm):\n class Meta:\n fields = 'created', 'created_by','description', 'service', 'status'\n model = models.WorkOrderRequest\n widgets = {\n 'description': forms.Textarea(attrs={'rows': 4})\n }\n\n ", "sub_path": "services/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 13340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.forms.ModelForm", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 27, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 33, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 34, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 37, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 41, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Div", "line_number": 47, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 49, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 49, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 55, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 55, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 62, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 64, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 64, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 76, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 76, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 81, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 82, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 83, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 84, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 90, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 91, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 99, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 101, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 101, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 103, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 104, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 104, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 106, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 111, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 111, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 116, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 117, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.TabHolder", "line_number": 118, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 119, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 120, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 121, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 122, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 133, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 134, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 142, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 144, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 144, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 144, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 145, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 145, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 145, "usage_type": "attribute"}, {"api_name": "django.forms.CharField", "line_number": 146, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 146, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 146, "usage_type": "attribute"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 152, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 152, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 155, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 155, "usage_type": "name"}, {"api_name": "employees.models.Employee.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "employees.models.Employee.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "employees.models.Employee", "line_number": 155, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 156, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 156, "usage_type": "name"}, {"api_name": "django.forms.PasswordInput", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.forms.ChoiceField", "line_number": 157, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 157, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 164, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 165, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 165, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 169, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 169, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 169, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 170, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 170, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 170, "usage_type": "attribute"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 175, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 182, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 183, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 184, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 185, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 186, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 187, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 188, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 189, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 191, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 192, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 193, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 194, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 197, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 201, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 201, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 201, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 202, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 202, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 203, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 203, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 204, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 204, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 204, "usage_type": "attribute"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 213, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 214, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 215, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 216, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 217, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 218, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 219, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 220, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 221, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 222, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 224, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 225, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 227, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 230, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 234, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 234, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 234, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 235, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 235, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 235, "usage_type": "attribute"}, {"api_name": "django_select2.forms.Select2Widget", "line_number": 241, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 248, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 249, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 250, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 251, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 252, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 253, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 254, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 255, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 257, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 258, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 259, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 260, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 263, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 266, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 266, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 266, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 267, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 267, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 268, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 269, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 269, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 269, "usage_type": "attribute"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 278, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 279, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 280, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 281, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 282, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 283, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 284, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 285, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 286, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 287, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Row", "line_number": 289, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Column", "line_number": 290, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 292, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 294, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 296, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 296, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 296, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 303, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 304, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.TabHolder", "line_number": 305, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 306, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 312, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 313, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.Tab", "line_number": 320, "usage_type": "call"}, {"api_name": "crispy_forms.layout.HTML", "line_number": 321, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 329, "usage_type": "call"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 332, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 332, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 332, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 333, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 333, "usage_type": "name"}, {"api_name": "employees.models.Employee.objects.filter", "line_number": 333, "usage_type": "call"}, {"api_name": "employees.models.Employee.objects", "line_number": 333, "usage_type": "attribute"}, {"api_name": "employees.models.Employee", "line_number": 333, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 334, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 334, "usage_type": "name"}, {"api_name": "django.forms.PasswordInput", "line_number": 334, "usage_type": "attribute"}, {"api_name": "django.forms.DateField", "line_number": 335, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 335, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 336, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 336, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 336, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 340, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 345, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 345, "usage_type": "name"}, {"api_name": "common_data.forms.BootstrapMixin", "line_number": 354, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 354, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 354, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 359, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 359, "usage_type": "name"}]}
+{"seq_id": "18779576", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom Utils.Tools import success, error,successResult,successResultList,errorResult\nfrom .models import Question\nfrom Worker.models import Worker\nfrom Respondent.models import RespondentMain,RespondentAnswer\nimport os\nimport json\nfrom Utils import Tools\n\n\n'''\n\n\n#查询未结束的问卷\ndef surveyNotEnd(request):\n if request.method == 'POST':\n m=SurveyService()\n lt=m.querySurveyAllList(searchkey='',end=False)\n relist=list()\n if lt != None and len(lt) > 0:\n for item in lt:\n sub=dict()\n sub['surveyid']=item.strSurveyId\n sub['title']=item.strTitle\n sub['max']=str(item.iMax)\n sub['createtime']=item.strCreateTime\n sub['endtime']=item.strEndTime\n sub['remark']=item.strRemark\n sub['filepath']=item.strFilePath\n relist.append(sub)\n if len(relist) > 0:\n re=successResultList(relist)\n return JsonResponse(re)\n else:\n re=errorResult('',-8)\n return JsonResponse(re)\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#下载问卷文件\ndef downFile(request):\n sid=request.GET.get('sid')\n if sid == None or sid=='':\n pass\n m=SurveyService()\n\n ob=m.getSurvey(sid)\n if ob == None or ob.strFilePath == None or ob.strFilePath == '':\n pass\n filepath_=ob.strFilePath\n\n\n def readFile(fn, buf_size=262144):#大文件下载,设定缓存大小\n f = open(fn, \"rb\")\n while True:#循环读取\n c = f.read(buf_size)\n if c:\n yield c\n else:\n break\n f.close()\n\n response = JsonResponse(readFile(filepath_), content_type='APPLICATION/OCTET-STREAM') #设定文件头,这种设定可以让任意文件都能正确下载,而且已知文本文件不是本地打开\n response['Content-Disposition'] = 'attachment; filename='+sid.encode('utf-8') + sid.encode('utf-8')#设定传输给客户端的文件名称\n response['Content-Length'] = os.path.getsize(filepath_)#传输给客户端的文件大小\n return response\n'''\n\n\n#终端登录\n@csrf_exempt\ndef mobileLogin(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n data=json.loads(strJson)\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n username=data.get('username','')\n pwd2=data.get('pwd','')\n ob=Worker.objects.filter(strSNO=username)\n if ob == None or len(ob) == 0:\n return JsonResponse(errorResult('用户名密码错误1'))\n else:\n bean=ob[0]\n if bean.strPwd==pwd2:\n return JsonResponse(successResult(bean.strSNO))\n else:\n return JsonResponse(errorResult('用户名密码错误2'))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#查询问题\n@csrf_exempt\ndef queryQuestion(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n timestr=json.loads(strJson)\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n\n tmp=Question.objects.filter(strTime__gte=timestr.get('time',''))\n re=list()\n if tmp !=None and len(tmp)>0:\n for item in tmp:\n sub=dict()\n sub['qid']=item.strQId\n sub['qno']=item.strQNo\n sub['question']=item.strQuestion\n sub['img']=item.strImg\n sub['type']=item.iType\n sub['a1']=item.strA1\n sub['a2']=item.strA2\n sub['a3']=item.strA3\n sub['a4']=item.strA4\n sub['a5']=item.strA5\n\n re.append(sub)\n return JsonResponse(successResultList(re))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n\n\n#上传回访信息(单人信息和答案)\n@csrf_exempt\ndef upRespondent(request):\n if request.method == 'POST':\n strJson=request.POST.get('strJson','')\n try:\n data=json.loads(strJson)\n person=data.get('person')\n alist=data.get('list')\n except:\n return JsonResponse(errorResult('请求数据解析失败'))\n\n #受访人信息\n if person != None:\n bean=RespondentMain()\n bean.strRid=person.get('rid','')\n bean.strName=person.get('name','')\n bean.iAge=person.get('age',0)\n\n sex=person.get('age',0)\n if sex == 1 :\n bean.strSex='男'\n else:\n bean.strSex='女'\n bean.strPhone=person.get('phone','')\n bean.strBorn=person.get('born','')\n bean.strMZ=person.get('mz','')\n\n bean.strStudyLv=person.get('studylv','')\n bean.strJob=person.get('job','')\n bean.strArea=person.get('area','')\n bean.strAddress=person.get('address','')\n bean.strMoney=person.get('money','')\n bean.strKISH=person.get('kish','')\n bean.strReceptTime=person.get('uptime','')\n bean.strSick=person.get('sick','')\n bean.iSickYear=person.get('year',0)\n bean.iLocal=person.get('local',0)\n bean.strSNO=person.get('sno','')\n bean.strSName=person.get('sname','')\n bean.strHealth=person.get('health','')\n bean.save()\n\n\n #答案\n if alist != None and len(alist) > 0:\n dblist=list()\n for item in alist:\n sub=RespondentAnswer()\n sub.strId=Tools.CreateUUID()\n sub.strRid=item.get('rid','')\n sub.strQId=item.get('qid','')\n sub.strItem=item.get('itemid','')\n dblist.append(sub)\n RespondentAnswer.objects.bulk_create(dblist)\n return JsonResponse(successResult('操作成功'))\n else:\n return JsonResponse(errorResult('错误的请求方式'))\n\n", "sub_path": "Question/mobileview.py", "file_name": "mobileview.py", "file_ext": "py", "file_size_in_byte": 6267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "json.loads", "line_number": 80, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 82, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 82, "usage_type": "call"}, {"api_name": "Worker.models.Worker.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "Worker.models.Worker.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Worker.models.Worker", "line_number": 85, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 87, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 87, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "Utils.Tools.successResult", "line_number": 91, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 93, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 93, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 95, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 95, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 75, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 107, "usage_type": "call"}, {"api_name": "models.Question.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 109, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "Utils.Tools.successResultList", "line_number": 126, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 128, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 128, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 100, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 142, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 142, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentMain", "line_number": 146, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentAnswer", "line_number": 180, "usage_type": "call"}, {"api_name": "Utils.Tools.CreateUUID", "line_number": 181, "usage_type": "call"}, {"api_name": "Utils.Tools", "line_number": 181, "usage_type": "name"}, {"api_name": "Respondent.models.RespondentAnswer.objects.bulk_create", "line_number": 186, "usage_type": "call"}, {"api_name": "Respondent.models.RespondentAnswer.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Respondent.models.RespondentAnswer", "line_number": 186, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 187, "usage_type": "call"}, {"api_name": "Utils.Tools.successResult", "line_number": 187, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 189, "usage_type": "call"}, {"api_name": "Utils.Tools.errorResult", "line_number": 189, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 133, "usage_type": "name"}]}
+{"seq_id": "577308025", "text": "#! /usr/bin/python\n\nimport warnings\nimport wikipedia\nimport datetime\nfrom translate import Translator\n\nLEADING_MESSAGE = \"ducky: \"\n\ndef wikipedia_ref(phrase):\n return wikipedia.summary(phrase)\n\nwhile True:\n user_input = input(\"ducky>>>\").strip()\n if user_input == \"date\":\n print(\"{} Da date is: {}\".format(LEADING_MESSAGE, datetime.date.today()))\n \n elif \"wiki\" in user_input:\n try:\n replaced_user_input = user_input.replace(\"wiki\", \"\")\n print(\"{} Here is your article: {}\".format(LEADING_MESSAGE, wikipedia_ref(replaced_user_input))) \n \n except wikipedia.exceptions.DisambiguationError:\n print(\"{} That is not a wikipedia page, here are some suggestions: {}\".format(LEADING_MESSAGE, wikipedia.search(replaced_user_input, suggestion=True))) \n \n elif \"tra\" in user_input:\n replaced_user_input = user_input.replace(\"tra\", \"\")\n list_of_args = replaced_user_input.split()\n arg_1 = list_of_args[0]\n arg_2 = list_of_args[1]\n translator = Translator(to_lang=arg_2)\n translation = translator.translate(arg_1)\n print(\"{} Translation: {} to {}\".format(LEADING_MESSAGE, arg_1, translation))\n \n elif \"bye\" in user_input:\n break \n", "sub_path": "ducky/ducky.py", "file_name": "ducky.py", "file_ext": "py", "file_size_in_byte": 1283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "wikipedia.summary", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 16, "usage_type": "attribute"}, {"api_name": "wikipedia.exceptions", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wikipedia.search", "line_number": 24, "usage_type": "call"}, {"api_name": "translate.Translator", "line_number": 31, "usage_type": "call"}]}
+{"seq_id": "457395364", "text": "# coding: utf-8\n\nimport os\nfrom selenium import webdriver\nimport unittest\nimport time,sys\nimport re\nimport HTMLTestRunner\n\n\nclass Baidu(unittest.TestCase):\n\n def setUp(self):\n # 添加 driver\n self.path = r\"C:\\Users\\lenovo\\AppData\\Local\\Google\\Chrome\\Application\\chromedriver.exe\"\n self.driver = webdriver.Chrome(executable_path=self.path)\n self.url = \"http://map.baidu.com\"\n\n # 增加 截图 函数\n def add_img(self):\n self.imgs.append(self.driver.get_screenshot_as_base64())\n return True\n\n def test_map_search(self):\n self.driver.get(self.url)\n self.driver.find_element_by_id(\"sole-input\").send_keys(u\"淮海路思南路口\")\n self.driver.find_element_by_id(\"search-button\").click()\n time.sleep(2)\n self.add_img() # 截图 调用 之前定义的函数\n return \"map search is done!\"\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n suit = unittest.TestSuite()\n suit.addTest(Baidu(\"test_map_search\"))\n\n runner = unittest.TextTestRunner()\n runner.run(suit)\n", "sub_path": "selenium_python自动化实战(练习)/htmltestrunner/python3/[推荐]输出_截图版/示例/Testcase/baidu/Sub_map.py", "file_name": "Sub_map.py", "file_ext": "py", "file_size_in_byte": 1103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 37, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "276649895", "text": "from django.shortcuts import render, redirect\nfrom datawisata.models import Datawisata\n\ndef home(request):\n datawisatas = Datawisata.objects.all()\n \n konteks = {\n 'datawisatas' : datawisatas, \n }\n\n return render(request, 'halamanutama.html', konteks)\n\n\n# def tambah_wisata(request):\n# form = FormDataWisata\n# konteks = {\n# 'form' : form,\n# }\n# return render(request, 'tambahWisata.html', konteks)\n\n\n# def hapus(request,id):\n# hapus = Datawisata.objects.filter(pk=id).delete()\n# return render(request,'halamanutama.html',hapus)\n\n# def edit(request,id):\n# edit = Datawisata.object.get(pk=id)\n# template = edit.html\n# if request.POST :\n# form = FormDataWisata(request.POST, instance=edit)\n# if form.is_valid():\n# form.save()\n# return redirect('edit', id=id)\n# else :\n# form = FormDataWisata(instance=edit)\n# konteks = {\n# 'form' : form,\n# 'edit' : edit,\n# }\n# return render(request, template, konteks)", "sub_path": "wisatajogja/home/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datawisata.models.Datawisata.objects.all", "line_number": 5, "usage_type": "call"}, {"api_name": "datawisata.models.Datawisata.objects", "line_number": 5, "usage_type": "attribute"}, {"api_name": "datawisata.models.Datawisata", "line_number": 5, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "98440126", "text": "import asyncio\nimport hashlib\nfrom dill.source import getsource\nfrom itertools import chain\n\nimport pytest\nfrom yarl import URL\n\nfrom aiobotocore.endpoint import ClientResponseProxy\n\nimport aiohttp\nfrom aiohttp.client import ClientResponse\nimport botocore\nfrom botocore.args import ClientArgsCreator\nfrom botocore.client import ClientCreator, BaseClient, Config\nfrom botocore.endpoint import convert_to_response_dict, Endpoint, \\\n EndpointCreator\nfrom botocore.paginate import PageIterator, ResultKeyIterator\nfrom botocore.session import Session, get_session\nfrom botocore.waiter import NormalizedOperationMethod, Waiter, \\\n create_waiter_with_client\nfrom botocore.eventstream import EventStream\nfrom botocore.parsers import ResponseParserFactory, PROTOCOL_PARSERS, \\\n RestXMLParser, EC2QueryParser, QueryParser, JSONParser, RestJSONParser\nfrom botocore.response import StreamingBody\n\n\n# This file ensures that our private patches will work going forward. If a\n# method gets updated this will assert and someone will need to validate:\n# 1) If our code needs to be updated\n# 2) If our minimum botocore version needs to be updated\n# 3) If we need to replace the below hash (not backwards compatible) or add\n# to the set\n\n# The follow is for our monkeypatches for read_timeout:\n# github.com/aio-libs/aiobotocore/pull/248\n_AIOHTTP_DIGESTS = {\n # for using _body\n ClientResponse: {'e178726065b609c69a1c02e8bb78f22efce90792'},\n}\n\n# These are guards to our main patches\n\n# !!! README: HOW TO UPDATE THESE !!!\n# -----------------------------------\n# (tests break with new version of aiohttp/botocore)\n#\n# 1) Adding support for more versions of aiohttp/botocore\n# In this scenario you need to ensure that aiobotocore supports the changes\n# that broke these tests along with the old versions of the libraries\n# and APPEND to the set of hashes that we support for each object you\n# validated.\n# 2) Bumping up the base version of aiohttp/botocore that we support\n# In this scenario ensure aiobotocore supports the new version of the libs\n# and REPLACE all entries with the current hashes with the new libs.\n\n# REPLACE = backwards incompatible change\n# APPEND = officially supporting more versions of botocore/aiohttp\n\n# If you're changing these, most likely need to update setup.py as well.\n_API_DIGESTS = {\n # args.py\n ClientArgsCreator.get_client_args: {'e3a44e6f50159e8e31c3d76f5e8a1110dda495fa'},\n\n # client.py\n ClientCreator._create_client_class: {'5e493d069eedbf314e40e12a7886bbdbcf194335'},\n ClientCreator._get_client_args: {'555e1e41f93df7558c8305a60466681e3a267ef3'},\n\n BaseClient._make_api_call: {'0c59329d4c8a55b88250b512b5e69239c42246fb'},\n BaseClient._make_request: {'033a386f7d1025522bea7f2bbca85edc5c8aafd2'},\n BaseClient.get_paginator: {'c69885f5f73fae048c0b93b43bbfcd1f9c6168b8'},\n BaseClient.get_waiter: {'23d57598555bfbc4c6e7ec93406d05771f108d9e'},\n\n # config.py\n Config.merge: {'c3dd8c3ffe0da86953ceba4a35267dfb79c6a2c8'},\n Config: {'2dcc44190a3dc2a4b26ab0ed9410daefcd7c93c1'},\n\n # endpoint.py\n convert_to_response_dict: {'2c73c059fa63552115314b079ae8cbf5c4e78da0'},\n\n Endpoint._send_request: {'50ab33d6f16e75594d01ab1c2ec6b7c7903798db'},\n Endpoint._get_response: {'46c3a8cb4ff7672b75193ce5571dbea48aa9da75'},\n Endpoint._do_get_response: {'df29f099d26dc057834c7b25d3b5217f1f7acbe4'},\n Endpoint._needs_retry: {'0f40f52d8c90c6e10b4c9e1c4a5ca00ef2c72850'},\n Endpoint._send: {'644c7e5bb88fecaa0b2a204411f8c7e69cc90bf1'},\n\n EndpointCreator.create_endpoint: {'36065caa2398573be229bee500e27303bc362348'},\n\n # eventstream.py\n EventStream._create_raw_event_generator: {\n 'cc101f3ca2bca4f14ccd6b385af900a15f96967b'},\n EventStream.__iter__: {'8a9b454943f8ef6e81f5794d641adddd1fdd5248'},\n\n # paginate.py\n PageIterator.__iter__: {'56b3a1e30f488e2f1f5d5309db42fd5ad8a3895d'},\n PageIterator.result_key_iters: {'04d3c647bd98caba3687df80e650fea517a0068e'},\n PageIterator.build_full_result: {'afe8cd8daad2cf32ae34f877985ab79501bf7742'},\n ResultKeyIterator: {'f71d98959ccda5e05e35cf3cf224fbc9310d33bb'},\n\n # parsers.py\n ResponseParserFactory.create_parser: {'5cf11c9acecd1f60a013f6facbe0f294daa3f390'},\n RestXMLParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n EC2QueryParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n QueryParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n JSONParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n RestJSONParser._create_event_stream: {'0564ba55383a71cc1ba3e5be7110549d7e9992f5'},\n\n # response.py\n StreamingBody: {'bb4d872649b0c118c9a3d5e44961e1bea92eb79c'},\n\n # session.py\n Session.__init__: {'ccf156a76beda3425fb54363f3b2718dc0445f6d'},\n Session.create_client: {'36f4e718fc4bada66808c2f98fa71835c09076f7'},\n get_session: {'c47d588f5da9b8bde81ccc26eaef3aee19ddd901'},\n\n # waiter.py\n NormalizedOperationMethod.__call__: {'79723632d023739aa19c8a899bc2b814b8ab12ff'},\n Waiter.wait: {'5502a89ed740fb5d6238a6f72a3a08efc1a9f43b'},\n create_waiter_with_client: {'c3d12c9a4293105cc8c2ecfc7e69a2152ad564de'},\n}\n\n_PROTOCOL_PARSER_CONTENT = {'ec2', 'query', 'json', 'rest-json', 'rest-xml'}\n\n\n@pytest.mark.moto\ndef test_protocol_parsers():\n # Check that no new parsers have been added\n current_parsers = set(PROTOCOL_PARSERS.keys())\n assert current_parsers == _PROTOCOL_PARSER_CONTENT\n\n\n# NOTE: this doesn't require moto but needs to be marked to run with coverage\n@pytest.mark.moto\ndef test_patches():\n print(\"Botocore version: {} aiohttp version: {}\".format(\n botocore.__version__, aiohttp.__version__))\n\n success = True\n for obj, digests in chain(_AIOHTTP_DIGESTS.items(), _API_DIGESTS.items()):\n digest = hashlib.sha1(getsource(obj).encode('utf-8')).hexdigest()\n if digest not in digests:\n print(\"Digest of {}:{} not found in: {}\".format(\n obj.__qualname__, digest, digests))\n success = False\n\n assert success\n\n\n# NOTE: this doesn't require moto but needs to be marked to run with coverage\n@pytest.mark.moto\n@pytest.mark.asyncio\nasync def test_set_status_code():\n resp = ClientResponseProxy(\n 'GET', URL('http://foo/bar'),\n loop=asyncio.get_event_loop(),\n writer=None, continue100=None, timer=None,\n request_info=None,\n traces=None,\n session=None)\n resp.status_code = 500\n assert resp.status_code == 500\n", "sub_path": "tests/test_patches.py", "file_name": "test_patches.py", "file_ext": "py", "file_size_in_byte": 6532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "aiohttp.client.ClientResponse", "line_number": 39, "usage_type": "name"}, {"api_name": "botocore.args.ClientArgsCreator.get_client_args", "line_number": 63, "usage_type": "attribute"}, {"api_name": "botocore.args.ClientArgsCreator", "line_number": 63, "usage_type": "name"}, {"api_name": "botocore.client.ClientCreator._create_client_class", "line_number": 66, "usage_type": "attribute"}, {"api_name": "botocore.client.ClientCreator", "line_number": 66, "usage_type": "name"}, {"api_name": "botocore.client.ClientCreator._get_client_args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "botocore.client.ClientCreator", "line_number": 67, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient._make_api_call", "line_number": 69, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 69, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient._make_request", "line_number": 70, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 70, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient.get_paginator", "line_number": 71, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 71, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient.get_waiter", "line_number": 72, "usage_type": "attribute"}, {"api_name": "botocore.client.BaseClient", "line_number": 72, "usage_type": "name"}, {"api_name": "botocore.client.Config.merge", "line_number": 75, "usage_type": "attribute"}, {"api_name": "botocore.client.Config", "line_number": 75, "usage_type": "name"}, {"api_name": "botocore.client.Config", "line_number": 76, "usage_type": "name"}, {"api_name": "botocore.endpoint.convert_to_response_dict", "line_number": 79, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._send_request", "line_number": 81, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 81, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._get_response", "line_number": 82, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 82, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._do_get_response", "line_number": 83, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 83, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._needs_retry", "line_number": 84, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 84, "usage_type": "name"}, {"api_name": "botocore.endpoint.Endpoint._send", "line_number": 85, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.Endpoint", "line_number": 85, "usage_type": "name"}, {"api_name": "botocore.endpoint.EndpointCreator.create_endpoint", "line_number": 87, "usage_type": "attribute"}, {"api_name": "botocore.endpoint.EndpointCreator", "line_number": 87, "usage_type": "name"}, {"api_name": "botocore.eventstream.EventStream._create_raw_event_generator", "line_number": 90, "usage_type": "attribute"}, {"api_name": "botocore.eventstream.EventStream", "line_number": 90, "usage_type": "name"}, {"api_name": "botocore.eventstream.EventStream.__iter__", "line_number": 92, "usage_type": "attribute"}, {"api_name": "botocore.eventstream.EventStream", "line_number": 92, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.__iter__", "line_number": 95, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 95, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.result_key_iters", "line_number": 96, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 96, "usage_type": "name"}, {"api_name": "botocore.paginate.PageIterator.build_full_result", "line_number": 97, "usage_type": "attribute"}, {"api_name": "botocore.paginate.PageIterator", "line_number": 97, "usage_type": "name"}, {"api_name": "botocore.paginate.ResultKeyIterator", "line_number": 98, "usage_type": "name"}, {"api_name": "botocore.parsers.ResponseParserFactory.create_parser", "line_number": 101, "usage_type": "attribute"}, {"api_name": "botocore.parsers.ResponseParserFactory", "line_number": 101, "usage_type": "name"}, {"api_name": "botocore.parsers.RestXMLParser._create_event_stream", "line_number": 102, "usage_type": "attribute"}, {"api_name": "botocore.parsers.RestXMLParser", "line_number": 102, "usage_type": "name"}, {"api_name": "botocore.parsers.EC2QueryParser._create_event_stream", "line_number": 103, "usage_type": "attribute"}, {"api_name": "botocore.parsers.EC2QueryParser", "line_number": 103, "usage_type": "name"}, {"api_name": "botocore.parsers.QueryParser._create_event_stream", "line_number": 104, "usage_type": "attribute"}, {"api_name": "botocore.parsers.QueryParser", "line_number": 104, "usage_type": "name"}, {"api_name": "botocore.parsers.JSONParser._create_event_stream", "line_number": 105, "usage_type": "attribute"}, {"api_name": "botocore.parsers.JSONParser", "line_number": 105, "usage_type": "name"}, {"api_name": "botocore.parsers.RestJSONParser._create_event_stream", "line_number": 106, "usage_type": "attribute"}, {"api_name": "botocore.parsers.RestJSONParser", "line_number": 106, "usage_type": "name"}, {"api_name": "botocore.response.StreamingBody", "line_number": 109, "usage_type": "name"}, {"api_name": "botocore.session.Session.__init__", "line_number": 112, "usage_type": "attribute"}, {"api_name": "botocore.session.Session", "line_number": 112, "usage_type": "name"}, {"api_name": "botocore.session.Session.create_client", "line_number": 113, "usage_type": "attribute"}, {"api_name": "botocore.session.Session", "line_number": 113, "usage_type": "name"}, {"api_name": "botocore.session.get_session", "line_number": 114, "usage_type": "name"}, {"api_name": "botocore.waiter.NormalizedOperationMethod.__call__", "line_number": 117, "usage_type": "attribute"}, {"api_name": "botocore.waiter.NormalizedOperationMethod", "line_number": 117, "usage_type": "name"}, {"api_name": "botocore.waiter.Waiter.wait", "line_number": 118, "usage_type": "attribute"}, {"api_name": "botocore.waiter.Waiter", "line_number": 118, "usage_type": "name"}, {"api_name": "botocore.waiter.create_waiter_with_client", "line_number": 119, "usage_type": "name"}, {"api_name": "botocore.parsers.PROTOCOL_PARSERS.keys", "line_number": 128, "usage_type": "call"}, {"api_name": "botocore.parsers.PROTOCOL_PARSERS", "line_number": 128, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 125, "usage_type": "attribute"}, {"api_name": "botocore.__version__", "line_number": 136, "usage_type": "attribute"}, {"api_name": "aiohttp.__version__", "line_number": 136, "usage_type": "attribute"}, {"api_name": "itertools.chain", "line_number": 139, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 140, "usage_type": "call"}, {"api_name": "dill.source.getsource", "line_number": 140, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 133, "usage_type": "attribute"}, {"api_name": "aiobotocore.endpoint.ClientResponseProxy", "line_number": 153, "usage_type": "call"}, {"api_name": "yarl.URL", "line_number": 154, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 155, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 151, "usage_type": "attribute"}]}
+{"seq_id": "611345819", "text": "from bot_detection import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_selection import RFE\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_recall_fscore_support,precision_score,recall_score,f1_score\nfrom sklearn.preprocessing import MinMaxScaler \nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ndef scale(df):\n X = df.loc[:,:]\n scaler=StandardScaler()\n names=X.columns\n scaled_df = scaler.fit_transform(X) #our real time input\n scaled_df=pd.DataFrame(scaled_df,columns=names)\n return scaled_df\n\n\ndef model_eval(X,y,result):\n X_train, X_test, y_train, y_test = train_test_split(X,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n model.fit(X_train,y_train)\n print(\"----------------Logistic Regression-------------------\")\n p=model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_true, y_predict, average='micro')\n #print(X_test)\n print('Accuracy : {:.2f}'.format(model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------Logisticc Regression-------------------\")\n\n\n\ndef svm(x,y,result):\n model=SVC()\n X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n model.fit(X_train,y_train)\n print('\\n\\n\\n')\n print(\"----------------SVM-------------------------\")\n p=model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_true, y_predict, average='micro')\n print('Accuracy : {:.2f}'.format(model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------SVM-------------------------\")\n #predict_svm=model.predict(result)\n #print(\"predicted by svm \", predict_svm)\n\n\ndef random_forest(x,y,result):\n rf_model = RandomForestClassifier(n_estimators=100, # Number of trees 100 default\n # max_features=2, # Num features considered ,\n oob_score=True,\n min_samples_split=10, #min no of samples required to split the tree\n max_depth=100, #default is none or untill min_samples condition is met\n max_features='auto',\n # bootstrap=False # if false, whole dataset is used for each tree\n )\n X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n rf_model.fit(X_train,y_train)\n print('\\n')\n print(\"----------------Random forest-------------------\")\n p=rf_model.predict(X_test)\n ans=X_test.copy()\n ans[\"Actual\"]=y_test\n ans[\"Predicted\"]=p\n# print(ans)\n ans1=ans[ans['Actual']==0]\n y_true=ans1[\"Actual\"]\n y_predict=ans1[\"Predicted\"]\n prf=precision_recall_fscore_support(y_test, p, average='micro')\n #print(X_test)\n print('Accuracy : {:.2f}'.format(rf_model.score(X_test, y_test)))\n print('Precision :',precision_score(y_test,p))\n print('Recall :',recall_score(y_test,p))\n print('F1-score :',f1_score(y_test,p))\n print(\"----------------Random forest-------------------\")\n prediction=rf_model.predict(result)\n print(\"\\nPrediction : \",prediction[0])\n if(prediction[0]==1):\n user='Spam'\n elif(prediction[0]==0):\n user='Not Spam'\n print(\"Final User Result : \",user)\n #print('Accuracy : '.accuracy_score(y_test,predict1))\n #print(\"predicted by random forest \", predict1)\n\ndef rfe_model(df,y,result):\n X_train, X_test, y_train, y_test = train_test_split(df,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n rfe=RFE(model,18)\n rfe = rfe.fit(X_train,y_train)\n print(rfe.support_)\n print(rfe.ranking_)\n cols=[\"description\",\"verified\",\"age\",\"followers\",\"mentions\",\"url_ratio\",\"hashtag\",\"retweet_rate\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\"]\n new_df=pd.DataFrame()\n for col in cols:\n new_df[col]=df[col]\n X_train, X_test, y_train, y_test = train_test_split(new_df,np.ravel(y), test_size=0.25,random_state=25)\n model=LogisticRegression(solver='liblinear')\n model.fit(X_train,y_train)\n model_eval(new_df,y,result)\n svm(new_df,y,result)\n random_forest(new_df,y,result)\n\n\n#__main__\ndata=pd.read_csv('tweet_info.txt',sep=\" \",header=None)\ndata.columns=[\"description\",\"verified\",\"age\",\"following\",\"followers\",\"reputation\",\"mentions\",\"unique_mentions\",\"url_ratio\",\"hashtag\",\"content_similarity\",\"retweet_rate\",\"reply_rate\",\"no_of_tweets\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\",\"s1\",\"s2\",\"fofo\",\"following_rate\",\"0-3\",\"3-6\",\"6-9\",\"9-12\",\"12-15\",\"15-18\",\"18-21\",\"21-24\"]\nlabel=pd.read_csv(\"label.txt\",header=None)\ndata[\"label\"]=label\n\nprint(data)\n\nX = data.loc[:, data.columns != 'label']\ny = data.loc[:, data.columns == 'label']\n\n\nscaler=StandardScaler()\ncols=[\"description\",\"verified\",\"age\",\"followers\",\"mentions\",\"url_ratio\",\"hashtag\",\"retweet_rate\",\"mean_of_intertweet_delay\",\"SD\",\"avg_tweets_day\",\"avg_tweeets_week\"]\nnew_df=pd.DataFrame()\nfor col in cols:\n new_df[col]=X[col]\nprint(new_df)\n\nrfe_model(new_df,y,extract_df)\n\nscaled_df = scaler.fit_transform(new_df)\nscaled_df=pd.DataFrame(scaled_df,columns=cols)\nresult=scaler.transform(extract_df)\nprint(\"\\n\\nAfter Scaling\\nresult is \", result)\nrfe_model(scaled_df,y,result)\n\n#def svm(x,y):\n# model=SVC()\n# X_train, X_test, y_train, y_test = train_test_split(x,np.ravel(y), test_size=0.25,random_state=15)\n# model.fit(X_train,y_train)\n# pred = model.predict(X_test)\n# print(confusion_matrix(y_test, pred))\n# print(classification_report(y_test, pred))\n\n#svm(scaled_df,np.ravel(y))\n\n#random_forest(scaled_df,np.ravel(y),result)\n\n#svm(new_df,np.ravel(y))\n\n#%matplotlib inline\nmatplotlib.style.use('ggplot')\nnp.random.seed(1)\ndf = pd.DataFrame({\n 'logistic_reg': np.random.normal(0, 2, 10000),\n 'svm': np.random.normal(5, 3, 10000),\n 'random_forest': np.random.normal(-5, 5, 10000)\n})\n\nscaler = preprocessing.StandardScaler()\nscaled_df = scaler.fit_transform(df)\nscaled_df = pd.DataFrame(scaled_df, columns=['logistic_reg', 'svm', 'random_forest'])\nimport seaborn as sns\nfig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 5))\nax1.set_title('Before Scaling')\nsns.kdeplot(df['logistic_reg'], ax=ax1)\nsns.kdeplot(df['svm'], ax=ax1)\nsns.kdeplot(df['random_forest'], ax=ax1)\nax2.set_title('After Scaleing')\nsns.kdeplot(scaled_df['logistic_reg'], ax=ax2)\nsns.kdeplot(scaled_df['svm'], ax=ax2)\nsns.kdeplot(scaled_df['random_forest'], ax=ax2)\nplt.show()\n\n\n", "sub_path": "twitter_analysis_final.py", "file_name": "twitter_analysis_final.py", "file_ext": "py", "file_size_in_byte": 7540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 82, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 144, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 180, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "seaborn.kdeplot", "line_number": 186, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 187, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 188, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 190, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 191, "usage_type": "call"}, {"api_name": "seaborn.kdeplot", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}]}
+{"seq_id": "496499434", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 8 19:49:25 2021\n\n@author: user\n\"\"\"\nimport keras\nfrom keras.layers import Input,Lambda,Dense,Flatten\nfrom keras.models import Model\n#from keras.applications.vgg19 import VGG19\nfrom keras.applications.xception import Xception as xc\nfrom keras_applications.xception import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nimport numpy as np\nfrom glob import glob\n#spacifying the path of images\ntrain_path='Datasets/train'\ntest_path='Datasets/test'\n\nIMAGW_SIZE=[224,224]\n#creating the obgetct of resnet50 class\nxcep=xc(input_shape=IMAGW_SIZE+[3],weights='imagenet',include_top=False)\nfor layers in xcep.layers:\n layers.trainable=False\n \n \n \nfolder=glob('Datasets/train/*') \nx=Flatten()(xcep.output)\nprediction=Dense(2,activation='softmax')(x)\nmodel=Model(inputs=xcep.input, outputs=prediction)\n\n\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\ntrain_datgen=ImageDataGenerator(rescale=1./255,zoom_range=0.2,shear_range=0.2,horizontal_flip=True)\ntest_datagen=ImageDataGenerator(rescale=1./255)\n\n\n\ntrainset=train_datgen.flow_from_directory('Datasets/train',\n target_size=(224,224),\n batch_size=32,\n class_mode='categorical')\ntestset=test_datagen.flow_from_directory('Datasets/test',\n target_size=(224,224),\n batch_size=32,\n class_mode='categorical')\n\n\nt=model.fit_generator(trainset,\n validation_data=testset,\n epochs=2,\n steps_per_epoch=len(trainset),\n validation_steps=len(testset)\n )\n\n\n\n\n\n\nimport tensorflow as tf\nfrom keras.models import load_model\n\n\nmodel.save('face_detection_model.h5')\n\n\n\n\n\n", "sub_path": "face_detection_model.py", "file_name": "face_detection_model.py", "file_ext": "py", "file_size_in_byte": 2037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.applications.xception.Xception", "line_number": 24, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "533778675", "text": "import os\nimport torch\n\nfrom skimage import io, transform\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport math\nfrom cropextract import *\nfrom torch.utils.data import Dataset, DataLoader\nimport scipy.misc as smi\nimport imageio\n\nimport scipy.io as sio\nimport scipy.misc as smi\nimport pickle\nfrom matplotlib import pyplot as plt\n\nclass PlanarPatchDataset(Dataset):\n def __init__(self, csv_file, root_dir, transform=None):\n f = open(os.path.join(root_dir, 'train.pkl'), 'rb')\n self.landmarks_frame = pickle.load(f)\n self.root_dir = root_dir\n self.transform = transform\n\n def __len__(self):\n return len(self.landmarks_frame)\n #return 256\n\n def __getitem__(self, idx):\n\n a = str(self.landmarks_frame[idx][1][0])\n p = str(self.landmarks_frame[idx][1][1])\n n = str(self.landmarks_frame[idx][1][2])\n\n rgb_anchor = os.path.join(self.root_dir, 'images/'+a+'.rgb.npy')\n normal_anchor = os.path.join(self.root_dir, 'images/'+a+'.n.npy')\n mask_anchor = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][0]+'.npy')\n\n rgb_positive = os.path.join(self.root_dir, 'images/'+ p + '.rgb.npy')\n normal_positive = os.path.join(self.root_dir, 'images/'+ p + '.n.npy')\n mask_positive = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][1]+'.npy')\n\n rgb_negative = os.path.join(self.root_dir, 'images/'+n + '.rgb.npy')\n normal_negative = os.path.join(self.root_dir, 'images/'+n + '.n.npy')\n mask_negative = os.path.join(self.root_dir, 'planes/' + self.landmarks_frame[idx][0][2]+'.npy')\n\n\n\n\n size = (240, 320)\n # rgb_anchor_image = cv2.resize(imageio.imread(rgb_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_anchor_image = cv2.resize(imageio.imread(normal_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_anchor_image = cv2.resize(imageio.imread(mask_anchor), dsize=size, interpolation=cv2.INTER_LINEAR)\n # rgb_negative_image = cv2.resize(imageio.imread(rgb_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_negative_image = cv2.resize(imageio.imread(normal_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_negative_image = cv2.resize(imageio.imread(mask_negative), dsize=size, interpolation=cv2.INTER_LINEAR)\n # rgb_positive_image = cv2.resize(imageio.imread(rgb_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n # normal_positive_image = cv2.resize(imageio.imread(normal_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n # mask_positive_image = cv2.resize(imageio.imread(mask_positive), dsize=size, interpolation=cv2.INTER_LINEAR)\n\n rgb_anchor_image = np.load(rgb_anchor)\n normal_anchor_image = np.load(normal_anchor)\n mask_anchor_image = np.load(mask_anchor)\n rgb_negative_image = np.load(rgb_negative)\n normal_negative_image = np.load(normal_negative)\n mask_negative_image = np.load(mask_negative)\n rgb_positive_image = np.load(rgb_positive)\n normal_positive_image = np.load(normal_positive)\n mask_positive_image = np.load(mask_positive)\n\n # m = 0.1 * np.stack((mask_anchor_image.astype(np.uint8), np.zeros(size), np.zeros(size)) , axis=-1)\n # plt.imshow( normal_anchor_image + m.astype(np.int))\n # plt.show()\n #\n # m = 0.1 * np.stack((mask_positive_image.astype(np.uint8), np.zeros(size), np.zeros(size)), axis=-1)\n # plt.imshow(normal_positive_image + m.astype(np.int))\n # plt.show()\n #\n # m = 0.1 * np.stack((mask_negative_image.astype(np.uint8), np.zeros(size), np.zeros(size)), axis=-1)\n # plt.imshow(normal_negative_image + m.astype(np.int))\n # plt.show()\n\n #plt.imshow(np.ma.array(mask_anchor_image, mask=~mask_anchor_image))\n\n\n sample = {\n 'rgb_anchor_image': rgb_anchor_image,\n 'normal_anchor_image': normal_anchor_image,\n 'mask_anchor_image': mask_anchor_image,\n 'rgb_negative_image': rgb_negative_image,\n 'normal_negative_image': normal_negative_image,\n 'mask_negative_image': mask_negative_image,\n 'rgb_positive_image': rgb_positive_image,\n 'normal_positive_image': normal_positive_image,\n 'mask_positive_image': mask_positive_image\n }\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n rgb_anchor_image,\\\n normal_anchor_image,\\\n mask_anchor_image,\\\n rgb_negative_image,\\\n normal_negative_image,\\\n mask_negative_image,\\\n rgb_positive_image,\\\n normal_positive_image,\\\n mask_positive_image = \\\n sample['rgb_anchor_image'],\\\n sample['normal_anchor_image'],\\\n sample['mask_anchor_image'],\\\n sample['rgb_negative_image'],\\\n sample['normal_negative_image'],\\\n sample['mask_negative_image'],\\\n sample['rgb_positive_image'],\\\n sample['normal_positive_image'],\\\n sample['mask_positive_image']\n\n\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n\n rgb_anchor_image = rgb_anchor_image.astype(np.float).transpose((2, 0, 1)) - 128\n rgb_negative_image = rgb_negative_image.astype(np.float).transpose((2, 0, 1)) - 128\n rgb_positive_image = rgb_positive_image.astype(np.float).transpose((2, 0, 1)) - 128\n\n normal_anchor_image = normal_anchor_image.astype(np.float).transpose((2, 0, 1)) - 128\n normal_negative_image = normal_negative_image.astype(np.float).transpose((2, 0, 1)) - 128\n normal_positive_image = normal_positive_image.astype(np.float).transpose((2, 0, 1)) - 128\n\n mask_anchor_image = mask_anchor_image.astype(np.float) - 128\n mask_negative_image = mask_negative_image.astype(np.float) - 128\n mask_positive_image = mask_positive_image.astype(np.float) - 128\n\n return {\n 'rgb_anchor_image': torch.from_numpy(rgb_anchor_image),\n 'normal_anchor_image': torch.from_numpy(normal_anchor_image),\n 'mask_anchor_image': torch.from_numpy(mask_anchor_image),\n 'rgb_negative_image': torch.from_numpy(rgb_negative_image),\n 'normal_negative_image': torch.from_numpy(normal_negative_image),\n 'mask_negative_image': torch.from_numpy(mask_negative_image),\n 'rgb_positive_image': torch.from_numpy(rgb_positive_image),\n 'normal_positive_image': torch.from_numpy(normal_positive_image),\n 'mask_positive_image': torch.from_numpy(mask_positive_image)\n }\n\n", "sub_path": "dataset_planematch.py", "file_name": "dataset_planematch.py", "file_ext": "py", "file_size_in_byte": 6930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 22, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 155, "usage_type": "call"}]}
+{"seq_id": "519944723", "text": "import threading\nimport time\nimport modbus_poller\n#import socket\nimport PID\nimport socketio\nimport datetime\nimport pandas as pd\n\ndef run_item(f, item):\n result_info = [threading.Event(), None]\n def runit():\n result_info[1] = f(item)\n result_info[0].set()\n threading.Thread(target=runit).start()\n return result_info\n\ndef gather_results(result_infos):\n results = [] \n for i in range(len(result_infos)):\n result_infos[i][0].wait()\n results.append(result_infos[i][1])\n return results\n\nclass ModbusValue:\n def __init__(self, name, unitId, registers, scalefactor):\n self.name = name\n self.unitId = unitId\n self.registers = registers\n self.scalefactor = scalefactor\n\ndef proc(item):\n c = modbus_poller.initModbusDevice('84.9.41.168', 502, item.unitId, True)\n val = modbus_poller.getHoldingRegisterValue(c, item.registers[0], 1, item.scalefactor)\n return modbus_poller.convertToSignedInt(val[0])/item.scalefactor\n\n#database connection details\n# HOST = 'localhost'\n# PORT = 9009\n# sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Initialise WebSocket\nsio = socketio.Client()\n\n@sio.on('my message')\ndef on_message(data):\n print('I received a message!')\n\n@sio.event\ndef connect():\n print(\"I'm connected!\")\n\n@sio.event\ndef connect_error(data):\n print(\"The connection failed!\")\n\n@sio.event\ndef disconnect():\n print(\"I'm disconnected!\")\n\nsio.connect('http://localhost:8080')\nprint('my sid is', sio.sid)\n\n#pid parameters\nP = 1.4\nI = 1\nD = 0.001\npid = PID.PID(P,I,D)\n\n#initialise system parameters\nsolar_inverter = ModbusValue('solar_inverter_W', 32, [1029], 1)\nsolar_charger = ModbusValue('solar_charger_W', 245, [789], 10)\nac_loads = ModbusValue('ac_loads_W', 100, [817], 1)\nsoc = ModbusValue('soc_W', 100, [843], 1)\nchargeState = ModbusValue('soc_W', 100, [844], 1)\ngrid = ModbusValue('grid_W', 31, [2600], 1)\nreal_grid_sp = ModbusValue('real_grid_sp_W', 246, [37], 1)\nbattery = ModbusValue('battery', 100, [842], 1)\nbatteryVoltage = ModbusValue('battery', 100, [840], 10)\nbatteryCurrent = ModbusValue('battery', 100, [841], 10)\nbatteryTemperature = ModbusValue('battery', 225, [262], 10)\n\n\nmodbusDevices = [solar_inverter, solar_charger, ac_loads, grid, real_grid_sp, \nsoc, chargeState, battery, batteryVoltage, batteryCurrent, batteryTemperature]\nmodbusValues = gather_results([run_item(proc, item) for item in modbusDevices])\n\npropertyLoadW = modbusValues[2]\npVGenerationW = modbusValues[0] + modbusValues[1]\nsetPointW = propertyLoadW - pVGenerationW\n\nfeedback = 0\npid.SetPoint = setPointW\npid.setSampleTime(0.01)\n\ntry:\n #connect to database\n #sock.connect((HOST, PORT))\n \n i = 0\n while True:\n i = i + 1\n modbusValues = gather_results([run_item(proc, item) for item in modbusDevices])\n propertyLoadW = modbusValues[2] \n pVGenerationW = modbusValues[0] + modbusValues[1]\n setPointW = propertyLoadW - pVGenerationW\n \n pid.update(feedback)\n output = pid.output\n \n feedback += (output - (1 / i))\n \n pid.SetPoint = setPointW\n \n #push readings to database\n #dataStr = gridImportExportW_real=' + str(modbusValues[3])\n \n timeNow = str(datetime.datetime.now())\n dataStr = { 'sensor': [\n { 'name': 'propertyLoad', 'point': { 'timestamp': timeNow, 'value': str(propertyLoadW) } },\n { 'name': 'pVGeneration', 'point': { 'timestamp': timeNow, 'value': str(pVGenerationW) } },\n { 'name': 'setPointSimulated', 'point': { 'timestamp': timeNow, 'value': str(setPointW) } },\n { 'name': 'setPointReal', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[4]) } },\n { 'name': 'gridSimulated', 'point': { 'timestamp': timeNow, 'value': str(feedback) } },\n { 'name': 'gridReal', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[3]) } },\n { 'name': 'soc', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[5]) } },\n { 'name': 'chargeState', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[6]) }},\n { 'name': 'battery', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[7]) }},\n { 'name': 'batteryVoltage', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[8]) }},\n { 'name': 'batteryCurrent', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[9]) }},\n { 'name': 'batteryTemperature', 'point': { 'timestamp': timeNow, 'value': str(modbusValues[10]) }}\n ]} \n\n\n currentTime= datetime.datetime.now()\n sio.emit('my message', {'data': dataStr })\n \n #sock.sendall(dataStr.encode())\n time.sleep(1)\n \nexcept Exception as e:\n print(\"Got error: %s\" % (e))\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#simulated_grid_sp = ModbusDevice()\n#print(gather_results([run_item(proc, item) for item in [1, 2, 10, 100]]))\n", "sub_path": "Contoller/real-time-graph.py", "file_name": "real-time-graph.py", "file_ext": "py", "file_size_in_byte": 4789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "threading.Event", "line_number": 11, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 15, "usage_type": "call"}, {"api_name": "modbus_poller.initModbusDevice", "line_number": 33, "usage_type": "call"}, {"api_name": "modbus_poller.getHoldingRegisterValue", "line_number": 34, "usage_type": "call"}, {"api_name": "modbus_poller.convertToSignedInt", "line_number": 35, "usage_type": "call"}, {"api_name": "socketio.Client", "line_number": 43, "usage_type": "call"}, {"api_name": "PID.PID", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}]}
+{"seq_id": "185650775", "text": "\nfrom modules.PreactDoubleLayer import PreactDoubleLayer\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import *\nimport torch.optim as optim\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nvFeat = [4, 4]\nnChan = vFeat[0]\nnClasses = 5\n\n# random batch\nx = normalInit([10, nChan, 32, 32]).to(device) # (numImages, numChans, image height, image width)\nW = normalInit([nChan + 1, nClasses]).to(device) # plus one for the bias\nlabels = torch.LongTensor([1, 2, 3, 4, 3, 2, 1, 0, 2, 3]).to(device)\nKconnect = normalInit([nChan, nChan, 1, 1]).to(device)\n\n# ----------------------------------------------------------------------\n# new approach\nparamsStruct = {'normLayer1': nn.BatchNorm2d(num_features=nChan),\n 'normLayer2': nn.BatchNorm2d(num_features=nChan),\n 'conv1': nn.Conv2d(in_channels=nChan, out_channels=nChan, kernel_size=3, padding=1, stride=1),\n 'conv2': nn.Conv2d(in_channels=nChan, out_channels=nChan, kernel_size=3, padding=1, stride=1)}\n\nnet = PreactDoubleLayer(vFeat, params=paramsStruct)\nnet.to(device)\norigK1 = net.conv1.weight.data.clone() # for old method\norigK2 = net.conv2.weight.data.clone() # for old method\nK1 = nn.Parameter(origK1.clone())\nK2 = nn.Parameter(origK2.clone())\noptimizer = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9, weight_decay=0, nesterov=False)\n\noptimizer.zero_grad()\ny1 = net.forward(x)\ny1 = F.avg_pool2d(y1, x.shape[2:4])\n\nloss1, _ = misfitW(y1, W, labels, device)\nloss1.backward()\noptimizer.step()\n\n# ----------------------------------------------------------------------\ndef compareFunc(x, K1, K2 ,device): # functional Preactivated DoubleLayer\n z = F.relu(x)\n z = conv3x3(z, K1)\n z = F.batch_norm(z, running_mean=torch.zeros(K1.size(0) ,device=device),\n running_var=torch.ones(K1.size(0) ,device=device), training=True)\n z = F.relu(z)\n z = conv3x3(z, K2)\n z = F.batch_norm(z, running_mean=torch.zeros(K2.size(0), device=device),\n running_var=torch.ones(K2.size(0), device=device), training=True)\n return z\n# old method\noptimParams = [{'params': K1}, {'params':K2}]\nnWeights = 0\noptimizer = optim.SGD(optimParams, lr=1e-1, momentum=0.9, weight_decay=0, nesterov=False)\n\noptimizer.zero_grad()\n\ny2 = compareFunc(x, K1,K2, device)\n\ny2 = F.avg_pool2d(y2, x.shape[2:4])\n\nloss2, _ = misfitW(y2, W, labels, device)\nloss2.backward()\noptimizer.step()\n\n# ----------------------------------------------------------------------\n\n# print('layer 2-norm difference:', torch.norm(y2 - y1, p=2).data) # want = 0\n# print('loss 2-norm difference: ', torch.norm(loss2 - loss1, p=2).data) # want = 0\n# print('K1 2-norm difference:', torch.norm(net.conv1.weight.data - K1.data, p=2).data) # want = 0\n# print('K2 2-norm difference:', torch.norm(net.conv2.weight.data - K2.data, p=2).data) # want = 0\n# print('K1 update: ',torch.norm(origK1 - K1.data, p=2).data) # want > 0\n# print('K2 update: ',torch.norm(origK2 - K2.data, p=2).data) # want > 0\n\ntol = 1e-5\nassert(torch.norm(y2 - y1, p=2).data < tol)\nassert(torch.norm(loss2 - loss1, p=2).data < tol)\nassert( torch.norm(net.conv1.weight.data - K1.data, p=2).data < tol )\nassert( torch.norm(net.conv2.weight.data - K2.data, p=2).data < tol )\n\nassert( torch.norm(origK1 - K1.data, p=2).data > 1e-4)\nassert( torch.norm(origK2 - K2.data, p=2).data > 1e-4)\nprint('tests passed')\n", "sub_path": "modules/testPreactDoubleLayer.py", "file_name": "testPreactDoubleLayer.py", "file_ext": "py", "file_size_in_byte": 3521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "modules.PreactDoubleLayer.PreactDoubleLayer", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.functional.batch_norm", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.functional.batch_norm", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "447220802", "text": "import copy\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import connections\nfrom django.test.client import RequestFactory, Client\n\ntry:\n from django.test.testcases import LiveServerThread\n LIVE_SERVER_SUPPORT = True\nexcept ImportError:\n LIVE_SERVER_SUPPORT = False\n\n\ndef pytest_funcarg__client(request):\n \"\"\"\n Returns a Django test client instance.\n \"\"\"\n return Client()\n\n\ndef pytest_funcarg__admin_client(request):\n \"\"\"\n Returns a Django test client logged in as an admin user.\n \"\"\"\n\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n user = User.objects.create_user('admin', 'admin@example.com',\n 'password')\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n client = Client()\n client.login(username='admin', password='password')\n\n return client\n\n\ndef pytest_funcarg__rf(request):\n \"\"\"\n Returns a RequestFactory instance.\n \"\"\"\n return RequestFactory()\n\n\ndef pytest_funcarg__settings(request):\n \"\"\"\n Returns a Django settings object that restores any changes after the test\n has been run.\n \"\"\"\n old_settings = copy.deepcopy(settings)\n\n def restore_settings():\n for setting in dir(old_settings):\n if setting == setting.upper():\n setattr(settings, setting, getattr(old_settings, setting))\n request.addfinalizer(restore_settings)\n return settings\n\n\nclass LiveServer(object):\n def __init__(self, host, possible_ports):\n\n connections_override = {}\n\n for conn in connections.all():\n # If using in-memory sqlite databases, pass the connections to\n # the server thread.\n if (conn.settings_dict['ENGINE'] == 'django.db.backends.sqlite3'\n and conn.settings_dict['NAME'] == ':memory:'):\n # Explicitly enable thread-shareability for this connection\n conn.allow_thread_sharing = True\n connections_override[conn.alias] = conn\n\n self.thread = LiveServerThread(host, possible_ports, connections_override)\n self.thread.daemon = True\n self.thread.start()\n\n self.thread.is_ready.wait()\n\n if self.thread.error:\n raise self.thread.error\n\n def __unicode__(self):\n return 'http://%s:%s' % (self.thread.host, self.thread.port)\n\n def __repr__(self):\n return '
' % unicode(self)\n\n def __add__(self, other):\n # Support string concatenation\n return unicode(self) + other\n\n\ndef get_live_server_host_ports():\n # This code is copy-pasted from django/test/testcases.py\n\n specified_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')\n\n # The specified ports may be of the form '8000-8010,8080,9200-9300'\n # i.e. a comma-separated list of ports or ranges of ports, so we break\n # it down into a detailed list of all possible ports.\n possible_ports = []\n try:\n host, port_ranges = specified_address.split(':')\n for port_range in port_ranges.split(','):\n # A port range can be of either form: '8000' or '8000-8010'.\n extremes = map(int, port_range.split('-'))\n assert len(extremes) in [1, 2]\n if len(extremes) == 1:\n # Port range of the form '8000'\n possible_ports.append(extremes[0])\n else:\n # Port range of the form '8000-8010'\n for port in range(extremes[0], extremes[1] + 1):\n possible_ports.append(port)\n except Exception:\n raise Exception('Invalid address (\"%s\") for live server.' % specified_address)\n\n return (host, possible_ports)\n\n\ndef pytest_funcarg__live_server(request):\n if not LIVE_SERVER_SUPPORT:\n raise Exception('The kwarg liveserver is not supported in Django <= 1.3')\n\n def setup_live_server():\n return LiveServer(*get_live_server_host_ports())\n\n def teardown_live_server(live_server):\n live_server.thread.join()\n\n return request.cached_setup(setup=setup_live_server, teardown=teardown_live_server, scope='session')\n", "sub_path": "pytest_django/funcargs.py", "file_name": "funcargs.py", "file_ext": "py", "file_size_in_byte": 4241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "django.test.client.Client", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 29, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 31, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 37, "usage_type": "call"}, {"api_name": "django.test.client.RequestFactory", "line_number": 47, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 55, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.connections.all", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 70, "usage_type": "name"}, {"api_name": "django.test.testcases.LiveServerThread", "line_number": 79, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 102, "usage_type": "attribute"}]}
+{"seq_id": "189718976", "text": "# -*- coding: utf-8 -*-\nimport os\n\nimport xlrd\nimport xlsxwriter\n\nexcel_path = 'p_files'\n\n\ndef load_p_data():\n p_scores = []\n for root, dir, files in os.walk(excel_path):\n for item in files:\n p_scores.append(read_excel(os.path.join(root, item)))\n return p_scores\n\n\ndef read_excel(path):\n try:\n workbook = xlrd.open_workbook(path, encoding_override='utf-8')\n target_sheet = workbook.sheets()[0]\n name = target_sheet.cell(1, 2).value\n emp_no = target_sheet.cell(1, 4).value\n line_manager = target_sheet.cell(2, 6).value\n employee = Employee(name, emp_no, line_manager)\n overall_self_assessment = \"%.3f\" % target_sheet.cell(11, 7).value\n overall_line_mgr_assessment = \"%.3f\" % target_sheet.cell(11, 8).value\n p_score = PScore(employee=employee, overall_self_assessment=overall_self_assessment,\n overall_line_mgr_assessment=overall_line_mgr_assessment)\n\n for row_index in range(4, 11):\n eval_item_detail = EvalItemDetail()\n eval_item_detail.item = target_sheet.cell(row_index, 1).value\n if not eval_item_detail.item.strip():\n break\n eval_item_detail.weight = \"%.2f\" % target_sheet.cell(row_index, 3).value\n eval_item_detail.self_desc = target_sheet.cell(row_index, 4).value\n eval_item_detail.self_assessment = \"%.2f\" % target_sheet.cell(row_index, 7).value\n eval_item_detail.line_mgr_assessment = \"%.2f\" % target_sheet.cell(row_index, 8).value\n p_score.add_eval_item(eval_item_detail)\n return p_score\n\n except Exception as e:\n print(\"error \", e)\n\n\ndef write_stats_to_excel(path, p_scores):\n wb = xlsxwriter.Workbook(path)\n work_sheet = wb.add_worksheet()\n headers = [\"名字\", \"主管\", \"自评\", \"主管评\"]\n for index, header in enumerate(headers):\n work_sheet.write(0, index, header)\n\n for index, p_score in enumerate(p_scores):\n if p_score:\n work_sheet.write(index, 0, p_score.employee.name)\n work_sheet.write(index, 1, p_score.employee.line_manager)\n work_sheet.write(index, 2, p_score.overall_self_assessment)\n work_sheet.write(index, 3, p_score.overall_line_mgr_assessment)\n\n wb.close()\n\n\nclass Employee:\n def __init__(self, name, emp_no, line_manager):\n self.name = name\n self.emp_no = emp_no\n self.line_manager = line_manager\n\n def __repr__(self, *args, **kwargs):\n return (\n \"{name}-{emp_no}-{line_manager}\".\n format(name=self.name, emp_no=self.emp_no, line_manager=self.line_manager))\n\n\nclass PScore:\n def __init__(self, employee, overall_self_assessment, overall_line_mgr_assessment):\n self.employee = employee\n self.overall_self_assessment = overall_self_assessment\n self.overall_line_mgr_assessment = overall_line_mgr_assessment\n self.eval_item_details = []\n\n def add_eval_item(self, eval_item_detail):\n self.eval_item_details.append(eval_item_detail)\n return self\n\n\nclass EvalItemDetail:\n def __init__(self):\n self.item = ''\n self.weight = ''\n self.self_desc = ''\n self.self_assessment = ''\n self.line_mgr_assessment = ''\n\n def __repr__(self):\n return \"_\".join([self.item, self.weight, self.self_desc, self.self_assessment, self.line_mgr_assessment])\n\n\nif __name__ == '__main__':\n write_stats_to_excel(\"all.xlsx\", load_p_data())\n", "sub_path": "get_shit_done/real_useful_project/p-view/p_stats.py", "file_name": "p_stats.py", "file_ext": "py", "file_size_in_byte": 3520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 20, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "332429417", "text": "from __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport argparse\nimport functools\n\n# On Python3, StringIO can come from standard library io:\nfrom ruamel.yaml.compat import StringIO\nimport ruamel.yaml\n\n\ndef repository_specific_formatting(empty_line_top_level, yaml_string):\n '''Transform function to ruamel.yaml's dump function. Makes sure there are\n only empty lines inbetween different top level keys (if empty_line_top_level\n is True, otherwise no empty lines).\n '''\n\n yaml_string = re.sub(r'\\n+', r'\\n', yaml_string) # Remove all empty lines\n\n if empty_line_top_level:\n yaml_string = re.sub( # Add one empty line between each package\n r'(\\n[^\\#][^\\n]*)\\n([^\\s])', r'\\1\\n\\n\\2', yaml_string\n )\n\n return yaml_string\n\n\ndef is_repository(config):\n '''Returns `False` if the configuration corresponds to a Komodo release\n (all config elements below top level key are strings). Returns `True` if\n it corresponds to a _repository_ (all config elements below top level key are\n themselves dictionaries).\n\n Raises ValueError if inconsistent throughout the config.\n '''\n\n # For Python2+3 compatibility. On Python3-only, use isinstance(x, str)\n # instead of isinstance(x, basestring).\n try:\n basestring\n except NameError:\n basestring = str # No basestring on Python3\n\n if all([isinstance(package, basestring) for package in config.values()]):\n return False\n elif all([isinstance(package, ruamel.yaml.comments.CommentedMap)\n for package in config.values()]):\n return True\n\n raise ValueError(\n 'Inconsistent configuration file. '\n 'Not able to detect if it is a release or repository.'\n )\n\n\ndef prettier(yaml_input_string):\n '''Takes in a string corresponding to a YAML Komodo configuration, and returns\n the corresponding prettified YAML string.'''\n\n ruamel_instance = ruamel.yaml.YAML()\n ruamel_instance.indent( # Komodo prefers two space indendation\n mapping=2, sequence=4, offset=2\n )\n ruamel_instance.width = 1000 # Avoid ruamel wrapping long\n\n try:\n config = ruamel_instance.load(yaml_input_string)\n except ruamel.yaml.constructor.DuplicateKeyError as e:\n raise SystemExit(str(e))\n\n komodo_repository = is_repository(config)\n\n # On Python3.6+, sorted_config can just be an\n # ordinary dict as insertion order is then preserved.\n sorted_config = ruamel.yaml.comments.CommentedMap()\n for package in sorted(config, key=str.lower):\n sorted_config[package] = config[package]\n\n setattr(sorted_config, ruamel.yaml.comments.comment_attrib, config.ca)\n\n yaml_output = StringIO()\n ruamel_instance.dump(\n sorted_config,\n yaml_output,\n transform=functools.partial(repository_specific_formatting, komodo_repository)\n )\n\n if sys.version_info < (3, 0):\n # Need to encode the byte-string on Python2\n return yaml_output.getvalue().encode('utf-8')\n\n return yaml_output.getvalue()\n\n\ndef prettified_yaml(filepath, check_only=True):\n '''Returns `True` if the file is already \"prettified\", `False` otherwise.\n If `check_only` is False, the input file will be \"prettified\" in place if necessary.\n '''\n\n print('Checking {}... '.format(filepath), end='')\n\n with open(filepath, 'r') as fh:\n yaml_input_string = fh.read()\n\n yaml_prettified_string = prettier(yaml_input_string)\n\n if yaml_prettified_string != yaml_input_string:\n print('{} reformatted!'.format('would be' if check_only else ''))\n if not check_only:\n with open(filepath, 'w') as fh:\n fh.write(yaml_prettified_string)\n return False\n\n print('looking good!')\n return True\n\n\ndef prettier_main():\n '''Main function doing user argument parsing and calling necessary functions.\n '''\n\n parser = argparse.ArgumentParser(\n description=(\n 'Check and/or format the Komodo configuration files. '\n 'Takes in any number of yml files, which could be e.g. the main '\n 'Komodo repository and an arbitrary number of releases. '\n 'Throws a hard error if the same package is defined multiple times.'\n )\n )\n parser.add_argument(\n 'files',\n type=lambda arg: arg if os.path.isfile(arg) \\\n else parser.error('{} is not a file'.format(arg)),\n nargs='+',\n help='One or more files to format/check',\n )\n parser.add_argument(\n '--check',\n action='store_true',\n help=(\n 'Do not write the files back, just return the status. '\n 'Return code 0 means nothing would change. '\n 'Return code 1 means some files would be reformatted.'\n ),\n )\n\n args = parser.parse_args()\n\n sys.exit(0) if all(\n [prettified_yaml(filename, args.check) for filename in args.files]\n ) or not args.check else sys.exit(1)\n\n\nif __name__ == '__main__':\n prettier_main()\n", "sub_path": "komodo/prettier.py", "file_name": "prettier.py", "file_ext": "py", "file_size_in_byte": 5009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 48, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 48, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml.YAML", "line_number": 62, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 62, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 70, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 70, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml.comments.CommentedMap", "line_number": 77, "usage_type": "call"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 77, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 77, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.yaml", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ruamel.yaml.compat", "line_number": 81, "usage_type": "name"}, {"api_name": "ruamel.yaml.compat.StringIO", "line_number": 83, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 90, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 151, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 153, "usage_type": "call"}]}
+{"seq_id": "227350987", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import neighbors\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\niris = load_iris()\nX = iris.data[:, :2] # Choosing only the first two input-features\ny = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\n# Visualizing the training data\nX_class0 = np.asmatrix(\n [X_train[i] for i in range(len(X_train)) if y_train[i] == 0]) # Picking only the first two classes\nY_class0 = np.zeros((X_class0.shape[0]), dtype=np.int)\nX_class1 = np.asmatrix([X_train[i] for i in range(len(X_train)) if y_train[i] == 1])\nY_class1 = np.ones((X_class1.shape[0]), dtype=np.int)\nX_class2 = np.asmatrix([X_train[i] for i in range(len(X_train)) if y_train[i] == 2])\nY_class2 = np.full((X_class2.shape[0]), fill_value=2, dtype=np.int)\n\nX_class0 = np.array(X_class0)\nX_class1 = np.array(X_class1)\nX_class2 = np.array(X_class2)\nplt.scatter(X_class0[:, 0], X_class0[:, 1], color='red')\nplt.scatter(X_class1[:, 0], X_class1[:, 1], color='blue')\nplt.scatter(X_class2[:, 0], X_class2[:, 1], color='green')\nplt.xlabel('sepal length')\nplt.ylabel('sepal width')\nplt.legend(['class 0', 'class 1', 'class 2'])\nplt.title('Fig 3: Visualization of training data')\nplt.show()\n\nmodel = neighbors.KNeighborsClassifier(n_neighbors=5) # K = 5\nmodel.fit(X_train, y_train)\n\nquery_point = np.array([5.9, 2.9])\ntrue_class_of_query_point = 1\npredicted_class_for_query_point = model.predict([query_point])\nprint(\"Query point: {}\".format(query_point))\nprint(\"True class of query point: {}\".format(true_class_of_query_point))\n\nneighbors_object = neighbors.NearestNeighbors(n_neighbors=5)\nneighbors_object.fit(X_train)\ndistances_of_nearest_neighbors, indices_of_nearest_neighbors_of_query_point = neighbors_object.kneighbors([query_point])\nnearest_neighbors_of_query_point = X_train[indices_of_nearest_neighbors_of_query_point[0]]\nprint(\"The query point is: {}\\n\".format(query_point))\nprint(\"The nearest neighbors of the query point are:\\n {}\\n\".format(nearest_neighbors_of_query_point))\nprint(\"The classes of the nearest neighbors are: {}\\n\".format(y_train[indices_of_nearest_neighbors_of_query_point[0]]))\nprint(\"Predicted class for query point: {}\".format(predicted_class_for_query_point[0]))\n\nplt.scatter(X_class0[:, 0], X_class0[:, 1], color='red')\nplt.scatter(X_class1[:, 0], X_class1[:, 1], color='blue')\nplt.scatter(X_class2[:, 0], X_class2[:, 1], color='green')\nplt.scatter(query_point[0], query_point[1], marker='^', s=75, color='black')\nplt.scatter(nearest_neighbors_of_query_point[:, 0], nearest_neighbors_of_query_point[:, 1], marker='s', s=150,\n color='yellow', alpha=0.30)\nplt.xlabel('sepal length')\nplt.ylabel('sepal width')\nplt.legend(['class 0', 'class 1', 'class 2'])\nplt.title('Fig 3: Working of the K-NN classification algorithm')\nplt.show()\n\ntest_set_predictions = [model.predict(X_test[i].reshape((1, len(X_test[i]))))[0] for i in range(X_test.shape[0])]\ntest_misclassification_percentage = 0\nfor i in range(len(test_set_predictions)):\n if test_set_predictions[i] != y_test[i]:\n test_misclassification_percentage += 1\ntest_misclassification_percentage *= 100 / len(y_test)\n\nprint(\"Evaluating K-NN classifier:\")\nprint('test misclassification percentage = {}%'.format(test_misclassification_percentage))\n", "sub_path": "KNearestNeighbours.py", "file_name": "KNearestNeighbours.py", "file_ext": "py", "file_size_in_byte": 3360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 7, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.asmatrix", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.asmatrix", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.asmatrix", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]}
+{"seq_id": "141477682", "text": "import requests\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\nimport os\n\nclass VirustotalSubdomainFinder:\n def grab_virustotal_subdomains_for_domain(domain):\n\n try:\n key = os.environ['VIRUSTOTAL_API_KEY']\n api_url = f'https://www.virustotal.com/vtapi/v2/domain/report?apikey={key}&domain={domain}'\n api_response = requests.get(api_url, verify=False)\n subdomains = api_response.json()['subdomains']\n return subdomains\n except Exception as e:\n print(e)\n", "sub_path": "assetmon/managers/tools/virustotal.py", "file_name": "virustotal.py", "file_ext": "py", "file_size_in_byte": 571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 3, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 12, "usage_type": "call"}]}
+{"seq_id": "366305585", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nif __name__ == \"__main__\":\n\tfrom models.accountLinkedin import *\n\tfrom models.accountCompany import *\n\tfrom utils.utils import *\n\tfrom seleniumClass.managerSelenium import SeleniumManager\n\tfrom seleniumClass.seleniumClientLinkedin import ClientLinkedin\n\tfrom settings.settingsLinkedin import *\nelse:\n\tfrom .models.accountLinkedin import *\n\tfrom .models.accountCompany import *\n\tfrom .utils.utils import *\n\tfrom .seleniumClass.managerSelenium import SeleniumManager\n\tfrom .seleniumClass.seleniumClientLinkedin import ClientLinkedin\n\tfrom .settings.settingsLinkedin import *\n\nfrom datetime import datetime\nimport sys\nimport argparse\nimport time\nimport os\nimport bs4\nimport platform\n\ndef standardUrl(url, company=False):\n\n\tif company:\n\t\ttab = url.split(\"linkedin.com\")\n\t\treturn \"https://www.linkedin.com\"+tab[1]\n\telse:\n\t\tif not \"linkedin.com/in\" in url:\n\t\t\treturn None\n\n\t\ttab = url.split(\"linkedin.com/in\")\n\n\t\t\"\"\" we ignore the string after the /name/ \"\"\"\n\t\ttab2 = tab[1].split(\"/\")\n\n\t\treturn \"https://www.linkedin.com/in/\"+tab2[1]\n\n#class permettant d'effectuer les recherches de personnes / scrapping d'information via Selenium sur Linkedin\nclass SearcherLinkedin:\n\n\tdef __init__(self, manager):\n\t\tself.manager = manager\n\t\tliclient = ClientLinkedin(self.manager.driver, search_keys)\n\t\tself.manager.connection(liclient)\n\n\t\"\"\" Effectuer le scrapping sur une page de recherche, récupérant les liens, sur Linkedin et va a la page suivant si possible\n\t\tUtiliez par la méthode findLinkedinsKeyWord(self, keywords)\n\t\"\"\"\n\tdef findLinkedinsScrapping(self):\n\n\t\t#Chargement de la page /!\\ \n\t\ttime.sleep(2)\n\t\t\n\t\t#On scroll histoire que la page soit charger pour le scrapping (sinon rique de manquer des elements)\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight/2);\")\n\t\ttime.sleep(1)\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\ttime.sleep(1)\n\n\t\thtml=self.manager.driver.page_source\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\")\n\n\t\tsame=soup.find_all('a', class_='search-result__result-link')\n\n\t\tliste = []\n\t\ta=0\n\t\tfor elem in same:\n\t\t\tliste.append('https://www.linkedin.com'+elem.get('href'))\n\n\t\tnext_page = soup.find_all('ol', class_='results-paginator')\n\t\tfor elem in next_page:\n\t\t\tsuivant=elem.find_all('button', class_='next')\n\t\t\tif (len(suivant)==1):\n\t\t\t\t#dans le cas ou il croit avoir trouvé un button ... ouais ça arrive si la connexion est trop lente\n\t\t\t\ttry:\n\t\t\t\t\tself.manager.driver.find_element_by_css_selector('button.next').click()\n\t\t\t\t\tliste = liste + self.findLinkedinsScrapping()\n\t\t\t\texcept:\n\t\t\t\t\tbreak\n\t\treturn set(liste)\n\n\t\"\"\" effectue une recherche avec des mots clés deja composé par exemples, frank candido president \"\"\"\n\tdef findLinkedinsKeyWord(self, keywords):\n\t\t\"\"\" fait une recherche avec les mots clefs, replace les espaces par un %20 pour qu'ils fonctionnent dans l'url \"\"\"\n\t\tkey=\"keywords=\"\n\t\tkeywords=keywords.strip()\n\t\tprofile_link=\"https://www.linkedin.com/search/results/people/?%s%s\" % (key, keywords.replace(' ','%20'))\n\n\t\tself.manager.get(profile_link, 3)\n\t\treturn self.findLinkedinsScrapping()\n\n\t\"\"\" effectue une recherche avec des mots clés dans une liste par exemples, liste={frank, candido, president} \"\"\"\n\tdef findLinkedinsByKeywordsByList(self, liste):\n\t\tstr_keywords = \"\"\n\t\tfor val in liste:\n\t\t\tstr_keywords = str_keywords + val + \" \"\n\t\treturn self.findLinkedinsKeyWord(str_keywords)\n\n\t\"\"\" effectue une recherche linkedin avec des informtions précise ecole et entreprise son optionnel \"\"\"\n\tdef findLinkedins(self, nom, prenom, ecole=None, entreprise=None):\n\t\t\"\"\"\n\t\t\tUsage :\n\t\t\tecole=\"str\", entreprise=\"str\" qui sont des paramètres optionnel\n\t\t\"\"\"\n\t\trecherche_nom= \"lastName=\"\n\t\trecherche_prenom = \"firstName=\"\n\t\tprofile_link=\"https://www.linkedin.com/search/results/people/?\"+recherche_nom+nom+\"&\"+recherche_prenom+prenom\n\n\t\tif ecole is not None:\n\t\t\trecherche_ecole=\"school=%s\" % ecole\n\t\t\tprofile_link+= \"&\"+recherche_ecole\n\t\tif entreprise is not None:\n\t\t\trecherche_entreprise=\"company=%s\" % entreprise\n\t\t\tprofile_link+= \"&\"+recherche_entreprise\n\n\n\t\tself.manager.get(profile_link, 3)\n\n\t\treturn self.findLinkedinsScrapping()\n\n\tdef findLinkedin(self, nom, prenom, url, file_tmp):\n\n\t\tcompte = CompteLinkedin(nom, prenom, url)\n\n\t\t\"\"\" pause 0 car on doit defiler vers le bas avant de faire la pause\"\"\"\n\t\tself.manager.get(url, 0)\n\t\t\n\t\t#on charge le haut de la page\n\t\ttime.sleep(3)\n\t\t#on scrolle vers le bas pour faire un chargement des centres d'interet\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t# on charge le bas de la page\n\t\ttime.sleep(3)\n\n\t\thtml=self.manager.driver.page_source\n\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\n\t\t#Education\n\t\tvaleurs = soup.find_all('section', class_='education-section')\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"-------------------------Education/Etude-------------------------\\n\")\n\t\tif(len(valeurs)==0):\n\t\t\tif file_tmp != \"\":\n\t\t\t\tfile_tmp.write('Empty\\n')\n\t\telse:\n\t\t\tres=\"\"\n\t\t\tfor elem in valeurs:\n\t\t\t\telem_valeurs = elem.find_all('li')\n\t\t\t\tfor e in elem_valeurs:\n\t\t\t\t\tif(e.get_text() != '') :\n\t\t\t\t\t\ttmp = formater(e.get_text())\n\t\t\t\t\t\tcompte.addEtude(d(tmp))\n\t\t\t\t\t\tres = res + '\\n\\n' + tmp\n\t\t\tif file_tmp != \"\":\n\t\t\t\tecriturePython2_Python3(file_tmp, res)\n\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t#Favoris\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"\\n-------------------------Favoris-------------------------\\n\")\n\t\tvaleurs = soup.find_all('li', class_='pv-interest-entity')\n\t\tfor elem in valeurs:\n\t\t\tif(elem.get_text()!= ''):\n\t\t\t\ttmp = formater(elem.get_text())\n\t\t\t\tcompte.addFavori(d(tmp))\n\t\t\t\tif file_tmp != \"\":\n\t\t\t\t\tecriturePython2_Python3(file_tmp, tmp)\n\t\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t# Recuperation en dur des experiences\n\t\texperiences = []\n\t\tvaleurs = soup.find_all('section', class_='experience-section')\n\n\t\tif file_tmp != \"\":\n\t\t\tfile_tmp.write(\"\\n-------------------------Experiences-------------------------\\n\")\n\t\tif(len(valeurs)==0):\n\t\t\tif file_tmp != \"\":\n\t\t\t\tfile_tmp.write('Empty\\n')\n\t\telse:\n\t\t\t\"\"\"depuis un tableau de type soup, On récupère la liste de tag li qu'on formatte pour l'affichage \n\t\t\tCette fonction est utilisé pour la partie education et expérience\"\"\"\n\t\t\tres=\"\"\n\t\t\tfor elem in valeurs:\n\t\t\t\telem_valeurs = elem.find_all('li')\n\t\t\t\tfor e in elem_valeurs:\n\t\t\t\t\tif(e.get_text() != '') :\n\t\t\t\t\t\ttmp = formater(e.get_text())\n\t\t\t\t\t\texperiences.append(d(tmp))\n\t\t\t\t\t\tres = res + '\\n\\n' + tmp\n\t\t\tif file_tmp != \"\":\n\t\t\t\tecriturePython2_Python3(file_tmp, res)\n\t\t\t\tfile_tmp.write('\\n\\n')\n\n\t\t#Recuperation des logos d'entreprises et des urls d'entreprises correspondantes\n\t\turlsExperiences = []\n\t\tsoupImageEntreprise = soup.select('.pv-profile-section.experience-section.ember-view a')\n\t\tfor elem in soupImageEntreprise:\n\t\t\tif elem.get_text() != '':\n\n\t\t\t\t#By default address url empty\n\t\t\t\tif \"company\" in elem.get('href'):\n\t\t\t\t\turlsExperiences.append(\"https://www.linkedin.com\"+elem.get('href'))\n\t\t\t\telse:\n\t\t\t\t\turlsExperiences.append(\"\")\n\n\t\t#Parcourt des experiences en dur et des urls d'entreprises lier\n\t\tnumExp = 0\n\t\tfor experience in experiences:\n\n\t\t\t#Default variable\n\t\t\tnom = \"\"\n\t\t\tnomE = \"\"\n\t\t\tdate = \"\"\n\t\t\tlocation = \"\"\n\t\t\tdescription = \"\"\n\t\t\tactif = True\n\t\t\tdomaine = \"\"\n\t\t\tdescriptionE = \"\"\n\n\t\t\tstr_tab=experience.split(\"\\n\")\n\t\t\tligne = 0\n\t\t\tfor strExp in str_tab :\n\t\t\t\tstrDecode = d(strExp)\n\t\t\t\tstrExpLow = strDecode.lower()\n\t\t\t\tstrExpLow_tab=strExpLow.split(\" \")\n\n\t\t\t\t#print(strDecode)\n\t\t\t\t#Instanciation of the experience job: (first ligne not empty)\n\t\t\t\tif ligne == 0:\n\t\t\t\t\tnom = strDecode\n\n\t\t\t\t#Instanciation of the date\n\t\t\t\tif \"dates\" == strExpLow_tab[0]:\n\t\t\t\t\tdate = strDecode[16:]\n\t\t\t\t\tactif = False\n\t\t\t\t\tfor var in [\"aujourd\", \"present\", \"now\", \"today\"]:\n\t\t\t\t\t\tif var in strExpLow:\n\t\t\t\t\t\t\tactif = True\n\t\n\t\t\t\t#Instanciation of the name of the entreprise\n\t\t\t\tif \"company\" == strExpLow_tab[0]:\n\t\t\t\t\tnomE = strDecode[13:]\n\n\t\t\t\t#Instanciation of the geolocalisation\n\t\t\t\tif \"location\" == strExpLow_tab[0]:\n\t\t\t\t\tlocation = strDecode.replace(\"Lieu \", \"\")\n\n\t\t\t\t#Instanciation of the description\n\t\t\t\tif (ligne>0) & (not strExpLow_tab[0] in ['duree', 'dates', 'nom', 'lieu']):\n\t\t\t\t\tdescription = strDecode\n\t\t\t\t\n\t\t\t\t# ++ si la ligne n'est pas vide\n\t\t\t\tif not strExp==\"\":\n\t\t\t\t\tligne = ligne + 1\n\n\t\t\t#Si un logo entreprise existe.\n\t\t\tif not urlsExperiences[numExp] == \"\":\n\t\t\t\t# wait for page load=3\n\t\t\t\tself.manager.get(urlsExperiences[numExp], 3)\n\t\t\t\thtml=self.manager.driver.page_source\n\t\t\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\t\t\t\tdivnom = soup.select('.org-top-card-module__name')\n\t\t\t\tdivdomaine = soup.select('.company-industries.org-top-card-module__dot-separated-list')\n\t\t\t\tdivlocation = soup.select('.org-top-card-module__location')\n\t\t\t\tdivdescription = soup.select('.org-about-us-organization-description p')\n\n\t\t\t\t#On preferera le nom et la localisation donner sur la page de l'entreprise si elle existe.\n\t\t\t\tfor elem in divnom:\n\t\t\t\t\tnomE = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divlocation:\n\t\t\t\t\tlocation = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divdomaine:\n\t\t\t\t\tdomaine = d(elem.get_text().strip(\"\\n \\r\"))\n\t\t\t\tfor elem in divdescription:\n\t\t\t\t\tdescriptionE = d(elem.get_text().strip(\"\\n \\r\"))\n\n\t\t\tcompte.addExperience(nom, date, location, description, actif, urlsExperiences[numExp], nomE, descriptionE, domaine)\n\n\t\t\t#++\n\t\t\tnumExp = numExp + 1\n\n\t\treturn compte\n\n\tdef findLinkedinCompany(self, link):\n\t\tself.manager.get(link, 0)\n\t\t#wait top page loading\n\t\ttime.sleep(3)\n\t\t#scroll down\n\t\tself.manager.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t\t# wait bottom page loading\n\t\ttime.sleep(3)\n\t\taccountCompanyLinkedin = AccountCompany(\"\",link)\n\t\thtml=self.manager.driver.page_source\n\t\tsoup=bs4.BeautifulSoup(html, \"html.parser\") #specify parser or it will auto-select for you\n\t\tdivnom = soup.select('.org-top-card-module__name')\n\t\tdivdomaine = soup.select('.company-industries.org-top-card-module__dot-separated-list')\n\t\tdivlocation = soup.select('.org-top-card-module__location')\n\t\tdivdescription = soup.select('.org-about-us-organization-description p')\n\n\t\t#On preferera le nom et la localisation donner sur la page de l'entreprise si elle existe.\n\t\tfor elem in divnom:\n\t\t\taccountCompanyLinkedin.nomComplet = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divlocation:\n\t\t\taccountCompanyLinkedin.position = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divdomaine:\n\t\t\taccountCompanyLinkedin.domaine = d(elem.get_text().strip(\"\\n \\r\"))\n\t\tfor elem in divdescription:\n\t\t\taccountCompanyLinkedin.description = d(elem.get_text().strip(\"\\n \\r\"))\n\n\t\treturn accountCompanyLinkedin\n\nif __name__ == '__main__':\n\tmanager = SeleniumManager(3)\n\tsearch = SearcherLinkedin(manager)\n\tliste = search.findLinkedinsKeyWord(\"frank candido president\")\n\tliste = list(liste)\n\t#liste = search.findLinkedins(\"candido\", \"frank\", entreprise=\"nuran\")\n\t#test pour cas plusieurs page = nbr résultat = 13\n\t#liste = search.findLinkedins(\"Legros\", \"camille\")\n\n\tfile_tmp = \"\"\n\tname_date_file = datetime.now().strftime('%H%M%d%m%Y')\n\tif sys.version_info >= (3, 0):\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_recherche'+name_date_file+'.log', 'w+', encoding=\"utf8\")\n\telse:\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_recherche'+name_date_file+'.log', 'w+')\n\tfor val in liste:\n\t\tprint(val)\n\t\tecriturePython2_Python3(file_tmp, val)\n\t\tfile_tmp.write('\\n')\n\tfile_tmp.close()\n\n\tfile_tmp = \"\"\n\tif sys.version_info >= (3, 0):\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_info'+name_date_file+'.log', 'w+', encoding=\"utf8\")\n\telse:\n\t\t#cas ou c'est en python2, il faudra dire que l'encodage sera fait en utf8 lors de l'écriture dans le fichier via str.encode(utf8) (qui fonctionne pas en python3 sinon c'est pas drole)\n\t\tfile_tmp=open('libraries/SNScrapping/log/sLinkedin_py_info'+name_date_file+'.log', 'w+')\n\n\tif len(liste) > 0 :\n\t\tcompte = search.findLinkedin(\"candido\", \"frank\", liste[0], file_tmp)\n\t\tcompte.homonymes = liste[1:]\n\t\tfor experience in compte.experiences:\n\t\t\tif platform.system() == \"Windows\":\n\t\t\t\tfile_tmp.write('\\n\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"date:\"+experience.date+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"description:\"+experience.description+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"urlEntreprise:\"+experience.urlEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"nomExperience:\"+experience.nomExperience+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"nomEntreprise:\"+experience.nomEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"geolocalisation:\"+experience.geolocalisation+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"descriptionE:\"+experience.descriptionEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"domaine:\"+experience.domaineEntreprise+'\\n')\n\t\t\t\tecriturePython2_Python3(file_tmp,\"expActif? %s \\n\" % experience.actif)\n\n\t\t\telse:\n\t\t\t\tprint(\"date:\",experience.date)\n\t\t\t\tprint(\"description\", experience.description)\n\t\t\t\tprint(\"urlEntreprise\", experience.urlEntreprise)\n\t\t\t\tprint(\"nomExperience\", experience.nomExperience)\n\t\t\t\tprint(\"nomEntreprise\", experience.nomEntreprise)\n\t\t\t\tprint(\"geolocalisation\", experience.geolocalisation)\n\t\t\t\tprint(\"descriptionE\", experience.descriptionEntreprise)\n\t\t\t\tprint(\"domaine\", experience.domaineEntreprise)\n\t\t\t\tprint(\"expActif?\", experience.actif)\n\tfile_tmp.close()\n\tmanager.driver_quit()\n", "sub_path": "project/libraries/SNScrapping/sLinkedin.py", "file_name": "sLinkedin.py", "file_ext": "py", "file_size_in_byte": 13467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "seleniumClass.seleniumClientLinkedin.ClientLinkedin", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 145, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 270, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 296, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 300, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 303, "usage_type": "call"}, {"api_name": "seleniumClass.managerSelenium.SeleniumManager", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 331, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 331, "usage_type": "name"}, {"api_name": "sys.version_info", "line_number": 332, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 343, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 353, "usage_type": "call"}]}
+{"seq_id": "68218009", "text": "__author__ = 'ecrisostomo'\n\nimport binascii\nimport hmac\nimport hashlib\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\nfrom stormpath.util import is_default_port, encode_url, str_query_string\n\nclass Sauthc1Signer:\n\n HOST_HEADER = \"Host\"\n AUTHORIZATION_HEADER = \"Authorization\"\n STORMPATH_DATE_HEADER = \"X-Stormpath-Date\"\n ID_TERMINATOR = \"sauthc1_request\"\n ALGORITHM = \"HMAC-SHA-256\"\n AUTHENTICATION_SCHEME = \"SAuthc1\"\n SAUTHC1_ID = \"sauthc1Id\"\n SAUTHC1_SIGNED_HEADERS = \"sauthc1SignedHeaders\"\n SAUTHC1_SIGNATURE = \"sauthc1Signature\"\n DATE_FORMAT = \"%Y%m%d\"\n TIMESTAMP_FORMAT = \"%Y%m%dT%H%M%SZ\"\n NL = \"\\n\"\n\n def sign_request(self, request, api_key):\n\n time = datetime.utcnow()\n time_stamp = time.strftime(self.TIMESTAMP_FORMAT)\n date_stamp = time.strftime(self.DATE_FORMAT)\n\n nonce = str(uuid4())\n\n parsed_url = urlparse(request.href)\n\n # SAuthc1 requires that we sign the Host header so we\n # have to have it in the request by the time we sign.\n host_header = parsed_url.hostname\n\n if not is_default_port(parsed_url):\n host_header = parsed_url.netloc\n\n request.http_headers[self.HOST_HEADER] = host_header\n\n request.http_headers[self.STORMPATH_DATE_HEADER] = time_stamp\n\n method = request.http_method\n canonical_resource_path = self._canonicalize_resource_path_(parsed_url.path)\n canonical_query_string = self._canonicalize_query_string_(request)\n canonical_headers_string = self._canonicalize_headers_(request)\n signed_headers_string = self._get_signed_headers_(request)\n request_payload_hash_hex = self._hash_hex_(self._get_request_payload_(request))\n\n canonical_request = ''.join((method, self.NL,\n canonical_resource_path, self.NL,\n canonical_query_string, self.NL,\n canonical_headers_string, self.NL,\n signed_headers_string, self.NL,\n request_payload_hash_hex))\n\n id = ''.join((api_key.id, \"/\", date_stamp, \"/\", nonce, \"/\", self.ID_TERMINATOR))\n\n canonical_request_hash_hex = self._hash_hex_(canonical_request)\n\n string_to_sign = ''.join((self.ALGORITHM, self.NL,\n time_stamp, self.NL,\n id, self.NL,\n canonical_request_hash_hex))\n\n # SAuthc1 uses a series of derived keys, formed by hashing different pieces of data\n k_secret = ''.join((self.AUTHENTICATION_SCHEME, api_key.secret))\n k_date = self._sign_(date_stamp, k_secret)\n k_nonce = self._sign_(nonce, k_date)\n k_signing = self._sign_(self.ID_TERMINATOR, k_nonce)\n\n signature = self._sign_(string_to_sign, k_signing)\n signature_hex = binascii.hexlify(signature).decode()\n\n authorization_header = ''.join((self.AUTHENTICATION_SCHEME, \" \",\n self._create_name_value_pair_(self.SAUTHC1_ID, id), \", \",\n self._create_name_value_pair_(self.SAUTHC1_SIGNED_HEADERS, signed_headers_string), \", \",\n self._create_name_value_pair_(self.SAUTHC1_SIGNATURE, signature_hex)))\n\n request.http_headers[self.AUTHORIZATION_HEADER] = authorization_header\n\n def _create_name_value_pair_(self, name, value):\n return ''.join((name, '=', value))\n\n def _sign_(self, data, key):\n\n try:\n byte_key = key.encode()\n except:\n byte_key = key\n\n return hmac.new(byte_key, data.encode(), hashlib.sha256).digest()\n\n def _hash_hex_(self, text):\n return hashlib.sha256(text.encode()).hexdigest()\n\n def _get_request_payload_(self, request):\n return self._get_request_payload_without_query_params_(request)\n\n def _get_request_payload_without_query_params_(self, request):\n\n result = ''\n\n if request.body:\n result = request.body\n\n return result\n\n def _get_signed_headers_(self, request):\n\n sorted_headers = OrderedDict(sorted(request.http_headers.items()))\n\n result = ''\n\n for header in sorted_headers.copy().keys():\n\n if result:\n result += ';' + header\n else:\n result += header\n\n return result.lower()\n\n def _canonicalize_headers_(self, request):\n\n sorted_headers = OrderedDict(sorted(request.http_headers.items()))\n\n result = ''\n\n for key, value in sorted_headers.items():\n\n result += ''.join((str(key).lower(), ':', value))\n result += self.NL\n\n return result\n\n def _canonicalize_query_string_(self, request):\n return str_query_string(request.query_string)\n\n def _canonicalize_resource_path_(self, resource_path):\n\n if resource_path:\n return encode_url(resource_path)\n else:\n return '/'\n", "sub_path": "stormpath/http/authc.py", "file_name": "authc.py", "file_ext": "py", "file_size_in_byte": 5132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 35, "usage_type": "call"}, {"api_name": "stormpath.util.is_default_port", "line_number": 41, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 78, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 97, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 97, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 116, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 131, "usage_type": "call"}, {"api_name": "stormpath.util.str_query_string", "line_number": 143, "usage_type": "call"}, {"api_name": "stormpath.util.encode_url", "line_number": 148, "usage_type": "call"}]}
+{"seq_id": "245220064", "text": "# encoding=utf-8\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\nimport scrapy\r\nfrom getpics.items import GetpicsItem, GetTitleItem\r\nfrom scrapy import log\r\nfrom scrapy.http import Request\r\n\r\n\r\nclass MySpider(scrapy.Spider):\r\n\tname = \"kook\"\r\n\tallowed_domains = [\"8264.com\"]\r\n\tstart_urls = [\r\n\t\t\"http://www.8264.com/list/871/\"\r\n\t]\r\n\tdownload_delay = 2\r\n\r\n\r\n\tdef parse(self, response):\r\n\t\titems = []\r\n\t\tfor sel in response.xpath('//div[@class=\"bbslistone\"]'):\r\n\t\t\titem = GetTitleItem()\r\n\t\t\t# item['image_urls'] = sel.xpath('a/img/@src').extract()[0]\r\n\t\t\t# item['image_paths'] = sel.xpath('div/a/text()').extract()[0]\r\n\t\t\t# 抓取title\r\n\t\t\ttitle = sel.xpath('div[@class=\"bbslistone_name\"]/a/text()').extract()[0]\r\n\t\t\tif len(title) == 0:\r\n\t\t\t\tlog.msg(\"fecth title failed\")\r\n\t\t\t\tcontinue\r\n\t\t\titem['title'] = title\r\n\t\t\ttitle_url = sel.xpath('a/@href').extract()[0]\r\n\t\t\t# 抓取title_url\r\n\t\t\tif len(title_url) == 0:\r\n\t\t\t\tlog.msg(\"fecth title_url failed\")\r\n\t\t\t\tcontinue\r\n\t\t\titem['title_url'] = title_url\r\n\t\t\titems.append(item)\r\n\t\t\tyield Request(item['title_url'], callback=self.parse_item, meta={'item':item})\r\n\r\n\t\t# for item in items:\r\n\t\t# \tyield Request(item['title_url'], callback=self.parse_item, meta={'item':item})\r\n\t\t# \tyield Request(item['title_url'], callback=self.parse_item)\r\n\t\t# \tyield item\r\n\r\n\r\n\r\n\r\n\tdef parse_item(self, response):\r\n\t\t# 获得由parse传来的item\r\n\t\t# item = response.meta['item']\r\n\t\t# 抓取image_urls\r\n\t\t# print response.body\r\n\r\n\r\n\t\tfor sel in response.xpath('//div[@class=\"t_fsz_new \"]'):\r\n\t\t\tfor img in sel.xpath('//img[@class=\"zoom\"]'):\r\n\r\n\t\t\t\titem =GetpicsItem()\r\n\t\t\t\timg_url = img.xpath('@file').extract()\r\n\t\t\t\tif len(img_url) == 0:\r\n\t\t\t\t\tlog.msg(\"there is no pics in the topic \")\r\n\t\t\t\t\treturn\r\n\t\t\t\ttitle = img.xpath('@title').extract()\r\n\t\t\t\tif len(title) == 0:\r\n\t\t\t\t\tlog.msg(\"the pic has no title\")\r\n\t\t\t\t\treturn\r\n\t\t\t\titem['image_urls'] = img_url\r\n\t\t\t\titem['title'] = title\r\n\t\t\t\tyield item\r\n\r\n\r\n\r\n\t\t# 抓取author\r\n\t\t# author", "sub_path": "getpics/getpics/spiders/MySpider.py", "file_name": "MySpider.py", "file_ext": "py", "file_size_in_byte": 1967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 4, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 11, "usage_type": "attribute"}, {"api_name": "getpics.items.GetTitleItem", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.log.msg", "line_number": 29, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 29, "usage_type": "name"}, {"api_name": "scrapy.log.msg", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 35, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 39, "usage_type": "call"}, {"api_name": "getpics.items.GetpicsItem", "line_number": 59, "usage_type": "call"}, {"api_name": "scrapy.log.msg", "line_number": 62, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 62, "usage_type": "name"}, {"api_name": "scrapy.log.msg", "line_number": 66, "usage_type": "call"}, {"api_name": "scrapy.log", "line_number": 66, "usage_type": "name"}]}
+{"seq_id": "521564570", "text": "'''\nReturns total price paid for individual rentals\n'''\nimport argparse\nimport json\nimport datetime\nimport math\nimport logging\n\nLOG_FILE = datetime.datetime.now().strftime(\"%Y-%m-%d\")+'_charges_calc.log'\nLOG_FORMAT = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\nFORMATTER = logging.Formatter(LOG_FORMAT)\n\nFILE_HANDLER = logging.FileHandler(LOG_FILE)\nFILE_HANDLER.setFormatter(FORMATTER)\n\nCONSOLE_HANDLER = logging.StreamHandler(LOG_FILE)\nCONSOLE_HANDLER.setFormatter(FORMATTER)\n\nLOGGER = logging.getLogger()\n\ndef parse_cmd_arguments():\n ''' Argumantes to pass '''\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', help='Debug level: 0:info, 1: error, 2: warn, 3: debug', default=0, required=False)\n return parser.parse_args()\n\n\ndef load_rentals_file(filename):\n ''' Loads rentals file data '''\n with open(filename) as file:\n try:\n LOGGER.info('loading JSON file %s', filename)\n return json.load(file)\n except ValueError as err:\n LOGGER.error('Failed to load JSON data: %s', err)\n exit(0)\n\n\ndef calculate_additional_fields(d_data):\n ''' Calculates additional fields '''\n LOGGER.info('Calculating additional fields')\n\n for value in d_data.values():\n try:\n rental_start = datetime.datetime.strptime(value['rental_start'], '%m/%d/%y')\n rental_end = datetime.datetime.strptime(value['rental_end'], '%m/%d/%y')\n except ValueError as err:\n LOGGER.warning(\"Failed to calculate time from %s or %s. ERROR: %s\", value['rental_start'], value['rental_end'], err)\n continue\n\n\n\n if rental_start >= rental_end:\n LOGGER.warning(\"Rental starts %s on or after ends %s in %s. Skip.\", rental_start, rental_end, value)\n continue\n\n try:\n value['total_days'] = (rental_end - rental_start).days\n value['total_price'] = value['total_days'] * value['price_per_day']\n value['sqrt_total_price'] = math.sqrt(value['total_price'])\n value['unit_cost'] = value['total_price'] / value['units_rented']\n except Exception as err:\n LOGGER.error('Failed Calculating additional fields: %s', err)\n exit(0)\n\n return d_data\n\ndef save_to_json(filename, data):\n ''' Save to json '''\n with open(filename, 'w') as file:\n json.dump(data, file)\n\nif __name__ == \"__main__\":\n ARGS = parse_cmd_arguments()\n DATA = load_rentals_file(ARGS.input)\n DATA1 = calculate_additional_fields(DATA)\n save_to_json(ARGS.output, DATA1)\n", "sub_path": "students/lguerrero/lesson02/assignment/src/charges_calc.py", "file_name": "charges_calc.py", "file_ext": "py", "file_size_in_byte": 2792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "datetime.datetime.now", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "426469045", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom args import *\nfrom model_parts import *\n\n\n'''\nModel head\n'''\nclass ModelDisigner(nn.Module):\n\tdef __init__(self):\n\t\tsuper(ModelDisigner, self).__init__()\n\t\tself.backbone = Backbone()\n\t\tself.score_branch = ScoreBranch()\n\t\tself.mask_branch = MaskBranch()\n\t\tself.up = nn.Upsample(scale_factor=2, mode='nearest')\n\t\tself.final = nn.Sequential(\n\t\t\tnn.Conv2d(1, NUM_CLASSES, kernel_size=1),\n\t\t\tnn.Sigmoid()\n\t\t\t)\n\n\tdef Correlation_func(self, s_f, t_f): # s_f-->search_feat, t_f-->target_feat\n\t\tt_f = t_f.reshape(-1, 1, t_f.size(2), t_f.size(3))\n\t\tout = s_f.reshape(1, -1, s_f.size(2), s_f.size(3)) # 1, b*ch, 32, 32\n\t\tout = F.conv2d(out, t_f, groups=t_f.size(0))\n\t\tout = out.reshape(-1, s_f.size(1), out.size(2), out.size(3))\n\t\treturn out\n\n\tdef Chiose_RoW(self, corr_feat, pos_list):\n\t\tcorr_feat = corr_feat.reshape(BATCH_SIZE, 17, 17, 256)\n\t\tj_tensors = torch.tensor([]).to(device)\n\t\tfor j in range(corr_feat.size(0)):\n\t\t\tj_tensor = corr_feat[j][pos_list[j, 0]][pos_list[j, 1]].unsqueeze(0)\n\t\t\tj_tensors = torch.cat([j_tensors, j_tensor], dim=0)\n\t\tj_tensors = j_tensors.unsqueeze(2).unsqueeze(3)\n\t\treturn j_tensors\n\n\n\tdef Choise_feat(self, feat, pos_list, x):\n\t\tfeat = feat.reshape(TIMESTEPS, BATCH_SIZE, feat.size(1), feat.size(2), feat.size(3))\n\t\tfeat = feat.permute(0, 1, 3, 4, 2)\n\n\t\ti_tensors = torch.tensor([]).to(device)\n\t\tfor i in range(feat.size(0)):\n\t\t\tj_tensors = torch.tensor([]).to(device)\n\t\t\tfor j in range(feat.size(1)):\n\t\t\t\tj_tensor = feat[i][j][x*pos_list[i][j][0]:x*pos_list[i][j][0]+x*16, x*pos_list[i][j][1]:x*pos_list[i][j][1]+x*16, :].unsqueeze(0)\n\t\t\t\tj_tensors = torch.cat([j_tensors, j_tensor], dim=0)\n\t\t\ti_tensor = j_tensors.unsqueeze(0)\n\t\t\ti_tensors = torch.cat([i_tensors, i_tensor], dim=0)\n\n\t\tfeat = i_tensors.permute(0, 1, 4, 2, 3)\n\t\tfeat = feat.reshape(TIMESTEPS*BATCH_SIZE, feat.size(2), feat.size(3), feat.size(4))\n\t\treturn feat\n\n\n\tdef forward(self, target, searchs):\n\t\t_, target_feat = self.backbone(target)\n\t\tsearch_cats, searchs_feat = self.backbone(searchs)\n\t\tcorr_feat = self.Correlation_func(searchs_feat, target_feat)\n\t\t##### Score Branch #####\n\t\tscore, pos_list = self.score_branch(corr_feat)\n\t\t# print(pos_list)\n\t\t##### Mask Branch #####\n\t\tmasks_feat = self.Chiose_RoW(corr_feat, pos_list)\n\t\tmask = self.mask_branch(masks_feat).reshape(BATCH_SIZE, 1, 64, 64)\n\t\tmask = self.up(mask)\n\t\tmask = self.final(mask)\n\t\treturn score, mask\n\n\nif __name__ == '__main__':\n\tmodel = ModelDisigner()\n\tmodel = model.to(device)\n\ttarget = torch.rand([BATCH_SIZE, 3, 128, 128]).to(device)\n\tsearchs = torch.rand([BATCH_SIZE, 3, 256, 256]).to(device)\n\tscore, mask = model(target, searchs)\n\tprint('score.shape: ', score.shape)\n\tprint('mask.shape: ', mask.shape)\n", "sub_path": "model_head_base.py", "file_name": "model_head_base.py", "file_ext": "py", "file_size_in_byte": 2745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 78, "usage_type": "call"}]}
+{"seq_id": "5323114", "text": "from collections import Counter\nimport h5py\nfrom itertools import combinations, permutations, product\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nfrom scipy.signal import decimate\nimport tensorflow as tf\n\n\nclass PairDataGeneratorRS(tf.keras.utils.Sequence):\n \"\"\"\n This class generates the pair data used to train/test a Siamese network,\n where train and test data come from same set of species.\n\n The pair data is restricted to come from different mice species to improve\n generality of the embedding learnt by the Siamese network.\n Pair from same classes is labeled with 0.\n Pair from different classes is labeled with 1.\n The set of sham/TBI mice must contain at least 2 unique species.\n Pair data in test set will not be in the train set.\n\n Currently, it only support num_classes = 4. Number of samples generated\n may be more than num_samples to ensure pair samples are distributed\n evenly among available pairs.\n\n Args:\n file_path: location of HDF5 files containing the EEG epochs\n file_template: template of filenamem, e.g., {}_BL5_ew32.h5\n sham_set: set of mice species with sham injury\n tbi_set: set of mice species with TBI\n purpose: purpose of generator - train or test\n batch_size: batch size\n num_classes: number of species classes\n num_samples: number of pair samples to generate\n regenerate: if True, new samples will be regenerated\n shuffle: if True, dataset are shuffled after each epoch\n decimate: decimation factor\n test_percent: percentage of pair samples used for test set\n overlap: if True, use overlapping epochs\n \"\"\"\n def __init__(self, file_path, file_template, sham_set, tbi_set,\n purpose='train', batch_size=32, num_classes=4,\n num_samples=1024, regenerate=False, shuffle=True, decimate=1,\n test_percent=20, overlap=True):\n self.file_path = file_path\n self.file_template = file_template\n self.decimate = decimate\n self.num_samples = num_samples\n assert purpose in ('train', 'test'),\\\n 'purpose must be either train or test'\n self.purpose = purpose\n assert test_percent >= 0 and test_percent <= 100,\\\n 'test_percent must be between 0 and 100'\n self.test_percent = test_percent\n\n # check that num_classes is set to 4\n assert num_classes == 4,\\\n 'Only num_classes = 4 is supported currently'\n self.num_classes = num_classes\n if num_classes == 4:\n self.stages = ['wake', 'sleep']\n self.num_class_combs = 10\n self.num_same_pairs = 4\n self.num_diff_pairs = 6\n elif num_classes == 6:\n self.stages = ['wake', 'nrem', 'rem']\n self.num_class_combs = 27\n self.num_same_pairs = 6\n self.num_diff_pairs = 21\n\n # check that sham_set and tbi_set contain at least 2 different species\n sham_set = list(Counter(sham_set))\n tbi_set = list(Counter(tbi_set))\n assert len(sham_set) > 1,\\\n \"Sham set must contain at least 2 unique species\"\n assert len(tbi_set) > 1,\\\n \"TBI set must contain at least 2 unique species\"\n self.sham_set = sham_set\n self.tbi_set = tbi_set\n\n # read from existing index file for generated samples\n # if regenerate = False; generate new index file if it does not exist\n if overlap:\n self.out_file = file_template[:-3].format('pairdata') +\\\n '_{}_{}_{}_{}.h5'.format(num_classes, batch_size, num_samples,\n test_percent)\n else:\n self.out_file = file_template[:-3].format('pairdata_novl') +\\\n '_{}_{}_{}_{}.h5'.format(num_classes, batch_size, num_samples,\n test_percent)\n if not os.path.exists(self.out_file) or regenerate:\n self._generate_labeled_pairs()\n\n # set the generator to be either train or test data generator\n num_test_samples = int(np.round(self.test_percent *\n self.num_samples / 100))\n if self.purpose == 'test':\n self.num_samples = num_test_samples\n self.df = pd.read_hdf(self.out_file, 'pair_index/test', mode='r')\n else:\n self.num_samples = num_samples - num_test_samples\n self.df = pd.read_hdf(self.out_file, 'pair_index/train', mode='r')\n assert batch_size <= self.num_samples,\\\n 'Batch size must be <= number of (train or test) samples'\n self.batch_size = batch_size\n\n # shuffle data if shuffle=True\n self.shuffle = shuffle\n self.on_epoch_end()\n\n def __len__(self):\n return int(np.ceil(self.num_samples / self.batch_size))\n\n def __getitem__(self, index):\n # set index range\n # max index is num_samples - 1 for last batch\n min_index = index * self.batch_size\n if (index + 1) * self.batch_size < self.num_samples:\n max_index = (index + 1) * self.batch_size - 1\n else:\n max_index = self.num_samples - 1\n # generate the batch\n data0 = []\n data1 = []\n labels = []\n for pidx in self.indexes[min_index:max_index + 1]:\n df = self.df\n epoch0, epoch1 = self.pair_data_from_hdf5(df.at[pidx, 'species0'],\n df.at[pidx, 'species1'],\n df.at[pidx, 'stage0'],\n df.at[pidx, 'stage1'],\n df.at[pidx, 'index0'],\n df.at[pidx, 'index1'])\n if self.decimate > 1:\n epoch0 = decimate(epoch0, self.decimate)\n epoch1 = decimate(epoch1, self.decimate)\n data0.append(epoch0)\n data1.append(epoch1)\n labels.append(df.at[pidx, 'label'])\n # convert datasets to numpy arrays\n shape = (len(data0), len(data0[0]), 1)\n data0 = np.array(data0).reshape(shape)\n data1 = np.array(data1).reshape(shape)\n labels = np.array(labels, dtype=int)\n\n return [data0, data1], labels\n\n def get_labels(self):\n labels = self.df['label'].tolist()[0:self.num_samples]\n return np.array(labels, dtype=int)\n\n def get_num_samples(self, species, stage):\n datafile = os.path.join(self.file_path,\n self.file_template.format(species))\n with h5py.File(datafile, 'r') as datafile:\n num_epoch_samples = datafile['eeg'][stage].shape[0]\n return num_epoch_samples\n\n def on_epoch_end(self):\n self.indexes = [i for i in range(self.num_samples)]\n if self.shuffle:\n np.random.shuffle(self.indexes)\n\n def pair_data_from_hdf5(self, species0, species1, stage0, stage1, idx0,\n idx1):\n file0 = os.path.join(self.file_path,\n self.file_template.format(species0))\n file1 = os.path.join(self.file_path,\n self.file_template.format(species1))\n data0 = []\n data1 = []\n with h5py.File(file0, 'r') as file0, h5py.File(file1, 'r') as file1:\n data0 = file0['eeg'][stage0][idx0]\n data1 = file1['eeg'][stage1][idx1]\n return data0, data1\n\n def _generate_labeled_pairs(self):\n if os.path.exists(self.out_file):\n os.remove(self.out_file)\n curr_train_index = 0\n curr_test_index = 0\n store = pd.HDFStore(self.out_file, mode='a', complevel=4,\n complib='zlib')\n for type in ['Sham', 'TBI', 'Both']:\n if type == 'Both':\n species_combs = list(product(self.sham_set, self.tbi_set))\n # div_factor is set to 1 since each pair of stages account\n # for one stage combination\n div_factor = 1\n else:\n if type == 'Sham':\n species_set = self.sham_set\n elif type == 'TBI':\n species_set = self.tbi_set\n species_combs = list(combinations(species_set, 2))\n # div_factor is set to 2 for 2 pairs of different stages\n # account for same stage combination in one set case\n div_factor = 2\n stage_perms = list(product(self.stages, self.stages))\n for species0, species1 in species_combs:\n for stage0, stage1 in stage_perms:\n num_epoch_samples0 = self.get_num_samples(species0, stage0)\n num_epoch_samples1 = self.get_num_samples(species1, stage1)\n if type == 'Both' or stage0 != stage1:\n num_pair_samples = int(np.ceil(self.num_samples / 2 /\n self.num_diff_pairs /\n div_factor /\n len(species_combs)))\n label = 1\n else:\n num_pair_samples = int(np.ceil(self.num_samples / 2 /\n self.num_same_pairs /\n len(species_combs)))\n label = 0\n temp_count = int(np.ceil(np.sqrt(num_pair_samples)))\n index0 = random.sample(list(range(num_epoch_samples0)),\n temp_count)\n index1 = random.sample(list(range(num_epoch_samples1)),\n temp_count)\n index_pair = random.sample(list(product(index0, index1)),\n num_pair_samples)\n index_pair = [list(t) for t in zip(*index_pair)]\n num_test_pair_samples = int(np.round(self.test_percent *\n num_pair_samples /\n 100))\n num_train_pair_samples = num_pair_samples -\\\n num_test_pair_samples\n df_train_index = list(range(curr_train_index,\n curr_train_index +\n num_train_pair_samples))\n df_test_index = list(range(curr_test_index,\n curr_test_index +\n num_test_pair_samples))\n index0 = index_pair[0][:num_train_pair_samples]\n index1 = index_pair[1][:num_train_pair_samples]\n store.append('pair_index/train',\n pd.DataFrame({'species0': species0,\n 'species1': species1,\n 'stage0': stage0,\n 'stage1': stage1,\n 'index0': index0,\n 'index1': index1,\n 'label': label},\n index=df_train_index),\n data_columns=True,\n min_itemsize={'species0': 7,\n 'species1': 7,\n 'stage0': 5,\n 'stage1': 5})\n curr_train_index += num_train_pair_samples\n index0 = index_pair[0][num_train_pair_samples:]\n index1 = index_pair[1][num_train_pair_samples:]\n store.append('pair_index/test',\n pd.DataFrame({'species0': species0,\n 'species1': species1,\n 'stage0': stage0,\n 'stage1': stage1,\n 'index0': index0,\n 'index1': index1,\n 'label': label},\n index=df_test_index),\n data_columns=True,\n min_itemsize={'species0': 7,\n 'species1': 7,\n 'stage0': 5,\n 'stage1': 5})\n curr_test_index += num_test_pair_samples\n store.close()\n", "sub_path": "pairdatageneratorrs.py", "file_name": "pairdatageneratorrs.py", "file_ext": "py", "file_size_in_byte": 13018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tensorflow.keras", "line_number": 12, "usage_type": "attribute"}, {"api_name": "scipy.signal.decimate", "line_number": 49, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.read_hdf", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.read_hdf", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 114, "usage_type": "call"}, {"api_name": "scipy.signal.decimate", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.signal.decimate", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.HDFStore", "line_number": 184, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 188, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 197, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 217, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 218, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 220, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 222, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 239, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 256, "usage_type": "call"}]}
+{"seq_id": "334692324", "text": "from flask import Flask, request\nimport redis\nimport json\nfrom mirrulations.docs_filter import process_docs\nfrom mirrulations.doc_filter import process_doc\nfrom mirrulations.redis_manager import RedisManager\nimport logging\nimport io\n\n\nFORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'\nlogging.basicConfig(filename='endpoints_log.log', format=FORMAT)\nd = {'clientip': '192.168.0.1', 'user': 'FLASK'}\nlogger = logging.getLogger('tcpserver')\n\napp = Flask(__name__)\n\nversion = 'v1.3'\n\n\ndef redis_server():\n return RedisManager(redis.Redis())\n\n\n@app.route('/')\ndef default():\n \"\"\"\n Default endpoint\n :return: Returns empty json\n \"\"\"\n logger.debug('Successful API Call: %s', 'default: default endpoint', extra=d)\n return json.dumps({})\n\n\n@app.route('/get_work')\ndef get_work():\n \"\"\"\n Endpoint the user will use to get work from the queue\n client_id will be one of the parameters given for logging purposes\n :return: Returns the json containing the job_id, the type of work to be done, the work that nees to be done, and\n the version number\n \"\"\"\n logging.warning(\"Successful API Call: %s\", 'get_work: get_work', extra=d)\n logger.info('Calling API to get work...')\n if len(request.args) != 1:\n logger.debug('Exception: %s', 'get_work: Get Exception for incorrect number of parameters', extra=d)\n logger.error('Error - number of parameters incorrect')\n return 'Parameter Missing', 400\n logger.debug('Assign Variable: %s', 'get_work: attempting to get client_id', extra=d)\n client_id = request.args.get('client_id')\n logger.debug('Variable Success: %s', 'get_work: successfully retrieved the client id', extra=d)\n if client_id is None:\n logging.warning(\"Exception: %s\", 'get_work: BadParameterException, client id was none', extra=d)\n logger.error('Error - no client ID')\n return 'Bad Parameter', 400\n logger.debug('Assign Variable: %s', 'get_work: attempting to get json_info from get_work - Calling get_work', extra=d)\n json_info = redis_server().get_work()\n logger.debug('Variable Success: %s', 'get_work: successfully retrieved the json_info', extra=d)\n logger.debug('Returning: %s', 'get_work: returning json_info to client from get_work', extra=d)\n logger.info('Work retrieved')\n return json.dumps(json_info)\n\n\n@app.route('/return_docs', methods=['POST'])\ndef return_docs():\n \"\"\"\n The endpoint the client calls to return the document ids received from the regulations docs calls\n :return: Returns a string saying successful so the client knows the call was successful\n \"\"\"\n logger.debug('Successful API Call: %s', 'return_docs: return docs', extra=d)\n logger.info('Attempting to return docs to server...')\n try:\n logger.debug('Assign Variable: %s', 'return_docs: attempting to get json_info from the request', extra=d)\n json_info = request.form['json']\n logger.debug('Variable Success: %s', 'return_docs: successfully retreived json_info', extra=d)\n logger.debug('Assign Variable: %s', 'return_doc: getting the files from the file request field', extra=d)\n files = request.files['file'].read()\n logger.debug('Variable Success: %s', 'return_doc: files successfully retrieved from the return doc post',\n extra=d)\n except:\n logger.debug('Exception: %s', 'return_docs: BadParameterException for return docs', extra=d)\n logger.error('Error - bad parameter')\n return 'Bad Parameter', 400\n if json_info is None:\n logger.debug('Exception: %s', 'return_docs: PostException for return docs', extra=d)\n logger.error('Error - could not post docs')\n return 'Bad Parameter', 400\n logger.debug('Calling Function: %s', 'return_docs: return_docs calling process_docs', extra=d)\n files = io.BytesIO(files)\n process_docs(redis_server(), json.loads(json_info), files)\n logger.debug('Function Successful: %s', 'return_docs: process_docs successfully called from return_docs', extra=d)\n logger.debug('Returning: %s', 'return_docs: returning success from return_docs', extra=d)\n logger.info('Docs returned to server')\n return 'Successful!'\n\n\n@app.route('/return_doc', methods=['POST'])\ndef return_doc():\n \"\"\"\n The endpoint the client calls to return documents they received from the individual regulations doc calls\n :return: Returns a string saying successful so the client knows the call was successful\n \"\"\"\n logger.debug('Successful API Call: %s', 'return_doc: return_doc call successful', extra=d)\n logger.info('Attempting to return doc to server...')\n\n try:\n logger.debug('Assign Variable: %s', 'return_doc: getting the files from the file request field', extra=d)\n files = request.files['file'].read()\n logger.debug('Variable Success: %s', 'return_doc: files successfully retrieved from the return doc post', extra=d)\n logger.debug('Assign Variable: %s', 'return_doc: get the json_info from the post request', extra=d)\n json_info= request.form['json']\n logger.debug('Variable Success: %s', 'return_doc: json retrieved from the doc post call', extra=d)\n except:\n logger.debug('Exception: %s', 'return_doc: BadParameterException for return_doc', extra=d)\n logger.error('Error - bad parameter')\n return 'Bad Parameter', 400\n files = io.BytesIO(files)\n logger.debug('Exception: %s', 'return_doc: BadParameterException for return_doc', extra=d)\n logger.debug('Calling Function: %s', 'return_doc: call process_docs with the json and files posted to return_doc endpoint', extra=d)\n process_doc(redis_server(), json.loads(json_info), files)\n logger.debug('Function Successful: %s', 'return_doc: success from return_doc', extra=d)\n logger.debug('Returning: %s', 'return_doc: returning success from return_doc', extra=d)\n logger.info('Doc returned to server')\n return 'Successful!'\n\n\ndef generate_json(work_list):\n \"\"\"\n Given a list of values, the list will be converted into json format\n :param work_list: The list of values that will be converted into json\n :return: Returns the json formatted list\n \"\"\"\n logger.info('Converting into JSON...')\n logger.debug('Call Successful: %s', 'generate_json: generate_json called successfully', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign job_id from the work_list', extra=d)\n job_id = work_list[0]\n logger.debug('Variable Success: %s', 'generate_json: jod_id assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign type from the work_list', extra=d)\n type = work_list[1]\n logger.debug('Variable Success: %s', 'generate_json: type assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign data from the work_list', extra=d)\n data = work_list[2]\n logger.debug('Variable Success: %s', 'generate_json: data assigned', extra=d)\n logger.debug('Assign Variable: %s', 'generate_json: assign converted_json from the combination of job_id, type, and data', extra=d)\n converted_json = {\n \"job_id\": job_id,\n \"type\": type,\n \"data\": data,\n \"version\": version\n }\n logger.debug('Variable Success: %s', 'generate_json: converted_json created', extra=d)\n logger.debug(\"Returning: %s\", 'generate_json: returning converted_json', extra=d)\n logger.info('JSON conversion successful')\n return json.dumps(converted_json)\n\n\ndef run():\n app.run('0.0.0.0', '8080')\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "src/mirrulations/endpoints.py", "file_name": "endpoints.py", "file_ext": "py", "file_size_in_byte": 7555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "mirrulations.redis_manager.RedisManager", "line_number": 22, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 89, "usage_type": "call"}, {"api_name": "mirrulations.docs_filter.process_docs", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 117, "usage_type": "call"}, {"api_name": "mirrulations.doc_filter.process_doc", "line_number": 120, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 120, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}]}
+{"seq_id": "150199566", "text": "import cv2\n\n\ndef main():\n image1 = cv2.imread(\"./images/X2.png\", 0)\n _, threshould = cv2.threshold(image1, 0, 255, cv2.THRESH_OTSU)\n write_name = './images/X_thr.png'\n cv2.imwrite(write_name, threshould)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Blavo/image_make.py", "file_name": "image_make.py", "file_ext": "py", "file_size_in_byte": 256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "69459559", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Guai(Sprite):\n def __init__(self,screen,ai_settings):\n super().__init__()\n self.screen=screen\n self.ai_settings=ai_settings\n\n self.image=pygame.image.load('dahong.png')\n #创建矩形\n self.rect=self.image.get_rect()\n \n #需要初始位置\n self.rect.left=float(0.5*self.rect.width)\n self.rect.top=float(self.rect.height)\n\n #需要初始方向,class调用可Settings,这样function不再调用Settings\n self.guai_direction=ai_settings.guai_direction\n\n #检查撞墙\n def check_edge(self):\n screen_rect=self.screen.get_rect()#必须重新创建,然后存储到一个对象中\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= 0:\n return True\n\n #横向移动函数,包含方向信息\n def update(self):\n self.rect.x += self.ai_settings.guai_speed_factor*self.guai_direction\n \n\n #重新绘制怪\n def blitme(self):\n self.screen.blit(self.image,self.rect)", "sub_path": "class_Guai/class_Guai.py", "file_name": "class_Guai.py", "file_ext": "py", "file_size_in_byte": 1107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}]}
+{"seq_id": "289486105", "text": "# Import socket module\nimport socket\nimport sys\nimport numpy as np\nimport time\nimport threading\n\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\n\nimport myTools.cnn_model as cnn\n\nflags.DEFINE_string('ip', '127.0.0.1', 'default ip')\nflags.DEFINE_integer('port', 12346, 'default port')\n\nnp.set_printoptions(threshold=sys.maxsize)\n\ninfo = []\n\n\ndef parse_message_received(data):\n parse = data.split(\";\")\n return parse\n\n\ndef get_img(data):\n tmp_data = data.split(\" \")\n print(tmp_data[1:-1])\n return\n\n\ndef main(_argv):\n logging.info('load cat modem')\n\n dog_1 = time.time()\n dog_model = cnn.get_inception_v2_cat()\n dog_2 = time.time()\n logging.info('cat model load in {:.2f}ms'.format((dog_2 - dog_1)))\n\n logging.info('Initialization connection at {}:{}'.format(FLAGS.ip, FLAGS.port))\n time_c_1 = time.time()\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((FLAGS.ip, FLAGS.port))\n time_c_2 = time.time()\n logging.info('Connected to {}:{} in {:.3f}ms'.format(FLAGS.ip, FLAGS.port, (time_c_2 - time_c_1)))\n\n message = \"0;RASP;127.0.0.1;CAT;\"\n send_1 = time.time()\n s.send(message.encode('utf-8'))\n send_2 = time.time()\n logging.info('Send identification message {} in {}ms'.format(message, (send_2 - send_1)))\n\n while True:\n\n try:\n data = s.recv(3000000)\n message_parsed = parse_message_received(data.decode('utf-8'))\n logging.info(\n 'message {} size : {:.2f}Mb received'.format(message_parsed[0], (sys.getsizeof(data) / 1000000)))\n if len(message_parsed) == 4:\n logging.info('\\t image {}'.format(message_parsed[0]))\n img = eval('np.array(' + message_parsed[2] + ')')\n process = ThreadCAT(message_parsed[0], img, dog_model, s)\n process.start()\n if data.decode('utf-8') == \"close\":\n break\n except (ConnectionResetError, ConnectionRefusedError):\n logging.info(\"Server close the connexion or not online\")\n break\n # close the connection\n s.close()\n\n\nclass ThreadCAT(threading.Thread):\n def __init__(self, id_img, img, model, s):\n threading.Thread.__init__(self)\n self.id_img = id_img\n self.img = img\n self.model = model\n self.s = s\n\n def run(self):\n cnn.thread_for_cnn(self.id_img, self.img, self.model, self.s)\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n", "sub_path": "TCPRasp.py", "file_name": "TCPRasp.py", "file_ext": "py", "file_size_in_byte": 2533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "absl.flags.DEFINE_string", "line_number": 13, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 13, "usage_type": "name"}, {"api_name": "absl.flags.DEFINE_integer", "line_number": 14, "usage_type": "call"}, {"api_name": "absl.flags", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 16, "usage_type": "attribute"}, {"api_name": "absl.logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 33, "usage_type": "name"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "myTools.cnn_model.get_inception_v2_cat", "line_number": 36, "usage_type": "call"}, {"api_name": "myTools.cnn_model", "line_number": 36, "usage_type": "name"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 38, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 40, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 40, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 40, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 42, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 42, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 42, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 43, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 43, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 45, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.ip", "line_number": 45, "usage_type": "attribute"}, {"api_name": "absl.flags.FLAGS", "line_number": 45, "usage_type": "name"}, {"api_name": "absl.flags.FLAGS.port", "line_number": 45, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 51, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 51, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 58, "usage_type": "name"}, {"api_name": "sys.getsizeof", "line_number": 59, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 61, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 68, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 74, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 76, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 76, "usage_type": "attribute"}, {"api_name": "myTools.cnn_model.thread_for_cnn", "line_number": 83, "usage_type": "call"}, {"api_name": "myTools.cnn_model", "line_number": 83, "usage_type": "name"}, {"api_name": "absl.app.run", "line_number": 88, "usage_type": "call"}, {"api_name": "absl.app", "line_number": 88, "usage_type": "name"}]}
+{"seq_id": "223910703", "text": "import time\nfrom selenium.common.exceptions import NoSuchElementException\nfrom Automate_Tests_Content.Web.Selenuim.utils.full_regression_utility import FullRegressionUtility\nfrom Automate_Tests_Content.Web.Selenuim.logger import Logger\n\n__author__ = \"Chulud Mallak\"\n__copyright__ = \"Utility\"\n__credits__ = [\"Chulud Mallak\"]\n__version__ = \"1.0.0\"\n__maintainer__ = \"Yaacov Pinhas\"\n__email__ = \"chuludx.mallak@intel.com\"\n__status__ = \"Production\"\n\n\nclass InfoButtonUtility:\n logger = Logger()\n __full_regression_utility = FullRegressionUtility()\n\n def __init__(self):\n pass\n\n def display_Search_Box(self, column_name, num, txt, elementid, browser):\n try:\n text_box = None\n div = browser.find_element_by_id(elementid)\n rows = div.find_elements_by_tag_name(\"tr\")\n td = rows[1].find_elements_by_tag_name(\"td\")\n for i in td:\n if i.text == column_name:\n self.logger.Info(\"Displays rows that contain a name that contains {} characters in {} coulmn \\n\".\n format(txt, column_name))\n time.sleep(1)\n first_table = browser.find_element_by_class_name(\"obj\")\n div = browser.find_elements_by_class_name(\"filter\")\n if column_name != \"Author\":\n text_box = div[num - 1].find_element_by_tag_name(\"input\")\n time.sleep(1)\n text_box.clear()\n text_box.send_keys(txt)\n time.sleep(7)\n else:\n select_box = div[num - 1].find_element_by_tag_name(\"select\")\n for option in select_box.find_elements_by_tag_name(\"option\"):\n if option.text == txt:\n option.click()\n break\n rows = first_table.find_elements_by_tag_name(\"tr\")\n rows.pop(0)\n time.sleep(1)\n if text_box is not None:\n text_box.clear()\n time.sleep(5)\n flag = 0\n for tr in rows:\n td = tr.find_elements_by_tag_name(\"td\")\n if td[num - 1].text.find(txt) == -1:\n flag = 1\n if flag == 1:\n self.logger.Info(\"sorting by type letters not succeeded...\\n\")\n else:\n print (\"sorting by type letters succeeded...\\n\")\n break\n finally:\n pass\n\n def Cont_Scroll_Off(self, browser):\n try:\n cont_scroll = browser.find_element_by_id(\"InfiniteScrollModeButton\")\n cont_scroll.click()\n time.sleep(2)\n files_list = browser.find_elements_by_class_name(\"InfiniteScrollPage\")\n if len(files_list) == 1:\n self.logger.Info(\"cont_scroll OFF\\n\")\n except Exception as e:\n self.logger.Error(str(e))\n\n def display_table_details(self, elementid, titles, tabname, browser):\n try:\n table = browser.find_element_by_id(elementid)\n rows = table.find_elements_by_tag_name(\"tr\")\n flag = 1\n td = rows[1].find_elements_by_tag_name(\"td\")\n for i in range(4):\n if td[i].text != titles[i]:\n flag = 0\n break\n if flag == 1:\n colmns = rows[2].find_elements_by_tag_name(\"td\")\n compare_putton = colmns[len(colmns)-1]\n if compare_putton is not None:\n self.logger.Info(\"Display Table of {} details succeeded...\\n\".format(tabname))\n except Exception as e:\n self.logger.Error(str(e))\n\n def Close_Tab(self, elementid, tabname, browser):\n try:\n close_button = browser.find_element_by_class_name(\"CloseInfoIcon\")\n time.sleep(4)\n close_button.click()\n time.sleep(6)\n try:\n browser.find_element_by_id(elementid)\n except NoSuchElementException:\n self.logger.Info(\"Close {} Tab succeeded...\\n\".format(tabname))\n except Exception as e:\n self.logger.Error(str(e))\n\n def get_started(self, tabname, browser):\n try:\n self.__full_regression_utility.cancel_edit_mode(browser)\n self.__full_regression_utility.switch_browser_to_frame(\"ContentOuterIFrame\", browser)\n self.Cont_Scroll_Off(browser)\n self.__full_regression_utility.click_on_info_button(browser)\n self.__full_regression_utility.click_option_menu_button(tabname, browser) # open References Tab\n except Exception as e:\n self.logger.Error(str(e))\n", "sub_path": "Automate_Tests_Content/Web/Selenuim/FullRegression/InfoButtonMenu/info_button_utility.py", "file_name": "info_button_utility.py", "file_ext": "py", "file_size_in_byte": 4930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "Automate_Tests_Content.Web.Selenuim.logger.Logger", "line_number": 16, "usage_type": "call"}, {"api_name": "Automate_Tests_Content.Web.Selenuim.utils.full_regression_utility.FullRegressionUtility", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 103, "usage_type": "name"}]}
+{"seq_id": "612184118", "text": "import torch.nn as nn\nimport torch\nimport numpy as np\nimport math\n\n\n\n\ndef _fast_hist(true, pred, num_classes):\n pred = np.round(pred).astype(int)\n true = np.round(true).astype(int)\n mask = (true >= 0) & (true < num_classes)\n hist = np.bincount(\n num_classes * true[mask] + pred[mask],\n minlength=num_classes ** 2,\n ).reshape(num_classes, num_classes).astype(np.float)\n return hist\n\ndef jaccard_index(hist):\n \"\"\"Computes the Jaccard index, a.k.a the Intersection over Union (IoU).\n Args:\n hist: confusion matrix.\n Returns:\n avg_jacc: the average per-class jaccard index.\n \"\"\"\n A_inter_B = np.diag(hist)\n A = np.sum(hist,axis=1)\n B = np.sum(hist,axis=0)\n jaccard = A_inter_B / (A + B - A_inter_B + 1e-6)\n avg_jacc =np.nanmean(jaccard) #the mean of jaccard without NaNs\n return avg_jacc, jaccard\n\ndef dice_coef_metric(hist):\n \"\"\"Computes the dice coefficient).\n Args:\n hist: confusion matrix.\n Returns:\n avg_dice: the average per-class dice coefficient.\n \"\"\"\n A_inter_B = np.diag(hist)\n A = np.sum(hist,axis=1)\n B = np.sum(hist,axis=0)\n dsc = A_inter_B * 2 / (A + B + 1e-6)\n avg_dsc=np.nanmean(dsc) #the mean of dsc without NaNs\n return avg_dsc\n\ndef dice_coef(inputs, target):\n intersection = 2.0 * (target * inputs).sum()+1e-4\n sum_ = target.sum() + inputs.sum()+1e-4\n\n return intersection / sum_\n\ndef dice_coef_loss(y_pred, y_true):\n smooth=1.0\n assert y_pred.size() == y_true.size()\n intersection = (y_pred * y_true).sum()\n dsc = (2. * intersection + smooth) / (\n y_pred.sum() + y_true.sum() + smooth\n )\n return 1. - dsc\n\n\ndef bce_dice_loss(y_pred, y_true):\n dicescore = dice_coef_loss(y_pred, y_true)\n log_cosh_dice=math.log(math.cosh(dicescore))\n bcescore = nn.BCELoss()\n m = nn.Sigmoid()\n bceloss = bcescore(m(y_pred), y_true)+log_cosh_dice\n return (bceloss)\n", "sub_path": "loss.py", "file_name": "loss.py", "file_ext": "py", "file_size_in_byte": 1949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.round", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 44, "usage_type": "call"}, {"api_name": "math.log", "line_number": 65, "usage_type": "call"}, {"api_name": "math.cosh", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}]}
+{"seq_id": "429018511", "text": "# Import library for keyboard, mouse inputs and display\r\nimport sys\r\n\r\nimport pygame\r\n\r\n# Initializes library pygame and pygames font\r\npygame.init()\r\npygame.font.init()\r\n\r\n# Defines colours used\r\nred = (218, 86, 72)\r\ngreen = (66, 151, 71)\r\nblue = (65, 80, 214)\r\n\r\n# Defines the size of the window\r\nscreenWidth = 1200\r\nscreenHeigh = 800\r\n\r\n# Defines how many bricks there will be.\r\ncols = 6\r\nrows = 6\r\n\r\n# Game variables\r\nplaying = False\r\nmainMenu = True\r\ncontrolSelect = False\r\ngameOver = 0\r\nscore = 0\r\nuseMouse = None\r\n\r\n\r\n# Creates the Bricks\r\nclass Bricks:\r\n def __init__(self):\r\n # Sets the width and height of each brick\r\n self.width = screenWidth // cols\r\n self.height = 50\r\n\r\n # Stores each brick\r\n self.bricks = []\r\n\r\n # Stores bricks in the row\r\n self.eachBrick = []\r\n\r\n # Creates the bricks\r\n def createBricks(self):\r\n # For each row\r\n for row in range(rows):\r\n # Stores each row\r\n brickRow = []\r\n # For each collum\r\n for col in range(cols):\r\n # Sets the X and Y value of the top left of each brick\r\n brickX = col * self.width\r\n brickY = row * self.height\r\n # Draws the bricks\r\n Rect = pygame.Rect(brickX, brickY, self.width, self.height)\r\n # Checks if the row is of the first third of the rows\r\n if row < rows // 3:\r\n # Sets the brick heath to 3\r\n health = 3\r\n # Checks if the row is of the first third of the rows\r\n elif row < (rows // 3) * 2:\r\n # Sets the brick heath to 2\r\n health = 2\r\n # Checks if the row is of the first third of the rows\r\n elif row < (rows // 3) * 3:\r\n # Sets the brick heath to 1\r\n health = 1\r\n # Stores the brick and health in a list\r\n eachBrick = [Rect, health]\r\n # Adds the brick and brick health to the row\r\n brickRow.append(eachBrick)\r\n # Adds the row to the bricks list\r\n self.bricks.append(brickRow)\r\n\r\n # Shows the bricks\r\n def showBricks(self):\r\n # For each row in the bricks list\r\n for row in self.bricks:\r\n # For each brick in the row\r\n for brick in row:\r\n # Checks the heath of the bricks\r\n if brick[1] == 3:\r\n # Sets the brick colour\r\n brickColour = red\r\n # Checks the heath of the bricks\r\n elif brick[1] == 2:\r\n # Sets the brick colour\r\n brickColour = green\r\n # Checks the heath of the bricks\r\n elif brick[1] == 1:\r\n # Sets the brick colour\r\n brickColour = blue\r\n # Draws the bricks\r\n pygame.draw.rect(canvas, brickColour, brick[0])\r\n # Draws the bricks border\r\n pygame.draw.rect(canvas, (0, 0, 0), (brick[0]), 1)\r\n\r\n\r\n# Creates Paddle\r\nclass Paddle:\r\n def __init__(self):\r\n # Sets the width and height of the paddle\r\n self.height = 20\r\n self.width = screenWidth // cols\r\n\r\n # Sets the x and y position of the paddle\r\n self.x = (screenWidth // 2) - (self.width // 2)\r\n self.y = screenHeigh - (self.height * 2)\r\n\r\n # Defines the rectangle\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\r\n\r\n # Sets the direction and speed of the brick\r\n self.direction = 0\r\n self.speed = 10\r\n\r\n # Saves the old mouse x pos for use later\r\n self.oldMouseX = 0\r\n\r\n # Allows you to move the paddle with your keyboard\r\n def keyboardmove(self):\r\n # Resets the saved direction of the paddle\r\n self.direction = 0\r\n\r\n # Sets pressed key in a variable\r\n key = pygame.key.get_pressed()\r\n\r\n # Checks if you push the left arrow key\r\n if key[pygame.K_LEFT] and self.rect.left > 0:\r\n # Moves the paddle\r\n self.rect.x -= self.speed\r\n # Sets the direction of the paddle\r\n self.direction = -1\r\n\r\n # Checks if you push the left arrow key\r\n if key[pygame.K_RIGHT] and self.rect.right < screenWidth:\r\n # Moves the paddle\r\n self.rect.x += self.speed\r\n\r\n # Sets the direction of the paddle\r\n self.direction = 1\r\n\r\n # Allows you to move the paddle with your mouse\r\n def mouseMove(self):\r\n # Resets the save direction of the mouse\r\n self.direction = 0\r\n # Gets the mouse pos tuple and convert it to the variables mouseX and mouseY\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n # Check if the mouseX position is greater than the old mouseX position\r\n if mouseX > self.oldMouseX:\r\n # Sets the direction to right\r\n self.direction = 1\r\n # Check if the mouseX position is less than the old mouseX position\r\n if mouseX < self.oldMouseX:\r\n # Sets the direction to left\r\n self.direction = 1\r\n # Move the paddle to the mouseX position\r\n self.rect.x = mouseX - self.rect.width / 2\r\n # Save the old mouseX\r\n self.oldMouseX = mouseX\r\n\r\n # Draws the paddle\r\n def show(self):\r\n pygame.draw.rect(canvas, red, self.rect)\r\n\r\n\r\n# Creates the ball\r\nclass Ball:\r\n # When ball is called require a x and y value\r\n def __init__(self, x, y):\r\n # Sets the balls radius\r\n self.radius = 10\r\n # Sets the X and Y coordinates of the ball\r\n self.x = x - self.radius\r\n self.y = y\r\n # Defines the balls hit-box\r\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\r\n # Sets the speed and the max speed of the ball.\r\n self.speedX = 4\r\n self.speedY = -4\r\n self.maxSpeed = 5\r\n # Sets how a game over happened\r\n self.gameOver = 0\r\n\r\n # Moves the ball\r\n def move(self):\r\n # Allows the score to be set\r\n global score\r\n # Defines the hit threshold\r\n threshold = 5\r\n # Resets the brickDestroyed variable\r\n bricksDestroyed = True\r\n # Creates the row counter\r\n rowC = 0\r\n # For each row in bricks list\r\n for row in bricks.bricks:\r\n # Creates the item counter\r\n itemC = 0\r\n # For each item in the row\r\n for item in row:\r\n # Checks if the ball collides with a brick\r\n if self.rect.colliderect(item[0]):\r\n # Checks if the ball collides with the top of the brick\r\n if abs(self.rect.bottom - item[0].top) < threshold and self.speedY > 0:\r\n # Moves the ball in the opposite y direction\r\n self.speedY *= -1\r\n # Checks if the ball collides with the bottom of the brick\r\n if abs(self.rect.top - item[0].bottom) < threshold and self.speedY < 0:\r\n # Moves the ball in the opposite y direction\r\n self.speedY *= -1\r\n # Checks if the ball collides with the left of the brick\r\n if abs(self.rect.right - item[0].left) < threshold and self.speedX > 0:\r\n # Moves the ball in the opposite x direction\r\n self.speedX *= -1\r\n # Checks if the ball collides with the right of the brick\r\n if abs(self.rect.left - item[0].right) < threshold and self.speedX < 0:\r\n # Moves the ball in the opposite x direction\r\n self.speedX *= -1\r\n # adds 10 to the score\r\n score += 10\r\n # Checks if the health of the brick is greater than 1\r\n if bricks.bricks[rowC][itemC][1] > 1:\r\n # Lowers the health of the bricks\r\n bricks.bricks[rowC][itemC][1] -= 1\r\n # If the heath is not greater than 1\r\n else:\r\n # Make the brick invisible and moves it off screen.\r\n bricks.bricks[rowC][itemC][0] = (-100, -100, 0, 0)\r\n\r\n # Checks if the there are bricks left\r\n if bricks.bricks[rowC][itemC][0] != (-100, -100, 0, 0):\r\n # Sets bricks destroyed to be false\r\n bricksDestroyed = False\r\n # Adds 1 to the item Counter\r\n itemC += 1\r\n # Adds 1 to the row counter\r\n rowC += 1\r\n\r\n # Checks if all bricks are destroyed\r\n if bricksDestroyed:\r\n # Sets gameOver\r\n self.gameOver = 1\r\n # Checks if the ball collide with the left or right side of the screen\r\n if self.rect.left < 0 or self.rect.right > screenWidth:\r\n # Reverse the balls x direction\r\n self.speedX *= -1\r\n # Checks if the ball collide with the top of the screen\r\n if self.rect.top < 0:\r\n # Reverse the balls Y direction\r\n self.speedY *= -1\r\n # Checks if the ball collide with the bottom of the screen\r\n if self.rect.bottom > screenHeigh:\r\n # Sets gameOver to state that you lost\r\n self.gameOver = -1\r\n\r\n # Checks if the ball collides with the paddle\r\n if self.rect.colliderect(paddle):\r\n # Checks if the paddle collides with the top of the paddle\r\n if abs(self.rect.bottom - paddle.rect.top) < threshold and self.speedY > 0:\r\n # Reverse the y direction\r\n self.speedY *= -1\r\n # Increases the X speed of the ball\r\n self.speedX += paddle.direction\r\n # Checks if the X speed of the ball is greater then the max speed\r\n if self.speedX > self.maxSpeed:\r\n # Sets the X speed of the ball to the max speed\r\n self.speedX = self.maxSpeed\r\n # Checks if the x speed is less then 0 and if the x speed is less than negative the max speed\r\n elif self.speedX < 0 and self.speedX < -self.maxSpeed:\r\n # Sets the X speed to the negative of the max speed\r\n self.speedX = -self.maxSpeed\r\n # Checks if it does not collide with the top of the paddle\r\n else:\r\n # Reverse the x speed\r\n self.speedX *= -1\r\n\r\n # Moves the ball the amount of the current ball speed\r\n self.rect.x += self.speedX\r\n self.rect.y += self.speedY\r\n\r\n # Returns the game over state\r\n return self.gameOver\r\n\r\n # Lets you draw the ball\r\n def show(self):\r\n # Draws the ball\r\n pygame.draw.circle(canvas, red, (self.rect.x + self.radius, self.rect.y + self.radius), self.radius)\r\n\r\n\r\n# Creates the menu\r\nclass Menus:\r\n def __init__(self):\r\n # Sets the title font\r\n self.titleF = pygame.font.Font(\"ARIAL.TTF\", 100)\r\n # Sets the rest if the buttons font\r\n self.font = pygame.font.Font(\"ARIAL.TTF\", 45)\r\n # Creates a rectangle the size of the each label to check if it gets clicked in the game loop\r\n self.play = pygame.Rect(screenWidth / 2 - 88 / 2, screenHeigh - 100, 88, 51)\r\n self.mouse = pygame.Rect(screenWidth / 2 - 135 / 2, screenHeigh - 250, 135, 51)\r\n self.keyboard = pygame.Rect(screenWidth / 2 - 198 / 2, screenHeigh - 150, 198, 51)\r\n self.keepPlaying = pygame.Rect(screenWidth / 2 - 181 / 2, screenHeigh - 250, 181, 51)\r\n self.toMenu = pygame.Rect(screenWidth / 2 - 277 / 2, screenHeigh - 150, 277, 51)\r\n\r\n # Shows the Main Menu\r\n def Main(self):\r\n # Shows the title\r\n title = self.titleF.render(\"Breakout\", False, red)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the play button\r\n play = self.font.render(\"Play\", False, green)\r\n canvas.blit(play, (self.play.x, self.play.y))\r\n\r\n # Shows the controls menu\r\n def Controls(self):\r\n # Shows the title\r\n title = self.titleF.render(\"Controller select\", False, blue)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the mouse button\r\n mouse = self.font.render(\"Mouse\", False, green)\r\n canvas.blit(mouse, (self.mouse.x, self.mouse.y))\r\n # Shows the keyboard button\r\n keyboard = self.font.render(\"KeyBoard\", False, green)\r\n canvas.blit(keyboard, (self.keyboard.x, self.keyboard.y))\r\n\r\n # Shows the win menu\r\n def Win(self):\r\n # Shows the title\r\n title = self.titleF.render(\"You Win!\", False, green)\r\n canvas.blit(title, (screenWidth / 2 - title.get_rect().width / 2, 25))\r\n # Shows the continue button\r\n keepPlaying = self.font.render(\"Continue\", False, blue)\r\n canvas.blit(keepPlaying, (self.keepPlaying.x, self.keepPlaying.y))\r\n # Shows the back to menu button\r\n toMenu = self.font.render(\"Back to menu\", False, blue)\r\n canvas.blit(toMenu, (self.toMenu.x, self.toMenu.y))\r\n\r\n\r\n# Initializes the bricks, the paddle, the ball, and the menus\r\nbricks = Bricks()\r\nbricks.createBricks()\r\npaddle = Paddle()\r\nball = Ball(paddle.x + (paddle.width // 2), paddle.y - paddle.height)\r\nmenu = Menus()\r\n\r\n# Sets the font and the font size\r\nfont = pygame.font.Font(\"ErbosDraco1StOpenNbpRegular-l5wX.ttf\", 30)\r\n\r\n# Creates the window and sets a display name.\r\ncanvas = pygame.display.set_mode((screenWidth, screenHeigh))\r\npygame.display.set_caption(\"Creative Task 2\")\r\n\r\n# Sets the running state to be true\r\nrun = True\r\n# While run is true or the main game loop\r\nwhile run:\r\n # Sets the background colour to be black\r\n canvas.fill((35, 37, 39))\r\n # print(pygame.mouse.get_pos())\r\n # Checks if you are in the main menu\r\n if mainMenu:\r\n menu.Main()\r\n if controlSelect:\r\n menu.Controls()\r\n # Checks if you are playing\r\n if playing:\r\n # Shows the bricks\r\n bricks.showBricks()\r\n # Shows the paddle\r\n paddle.show()\r\n # shows the ball\r\n ball.show()\r\n # Shows the score\r\n label = font.render(str(score), False, (255, 255, 255))\r\n canvas.blit(label, (12, screenHeigh - 42))\r\n # Allows the ball to move and gets the gameOver variable from ball.move\r\n gameOver = ball.move()\r\n # Checks if you are using the mouse\r\n if useMouse:\r\n # Allows the paddle to be moved by your mouse\r\n paddle.mouseMove()\r\n # If you aren't using your mouse\r\n else:\r\n # Allows the paddle to be moved by your keyboard\r\n paddle.keyboardmove()\r\n\r\n # Checks if you won\r\n if gameOver == 1:\r\n # Ends the game\r\n playing = False\r\n # Open the win menu\r\n menu.Win()\r\n # Checks if you lost\r\n if gameOver == -1:\r\n # Ends the game\r\n playing = False\r\n # Gets the mouse position\r\n pos = pygame.mouse.get_pos()\r\n # For each event in pygame\r\n for e in pygame.event.get():\r\n # Checks if the event is a quit\r\n if e.type == pygame.QUIT:\r\n # Stops the game loop\r\n run = False\r\n # Checks if you click your mouse\r\n if e.type == pygame.MOUSEBUTTONDOWN:\r\n # Checks if you click the play button and you are in the main menu\r\n if menu.play.collidepoint(pos) and mainMenu:\r\n # Sets that you are in the control select menu\r\n controlSelect = True\r\n # Sets that you are not in the main menu\r\n mainMenu = False\r\n\r\n # Checks if you click the mouse button and you are in the controller select menu\r\n if menu.mouse.collidepoint(pos) and controlSelect:\r\n # Takes you out of the controller select menu\r\n controlSelect = False\r\n # Takes you in to the game\r\n playing = True\r\n # Sets that you are using mouse controls\r\n useMouse = True\r\n # Checks if you click the keyboard button and you are in the controller select menu\r\n if menu.keyboard.collidepoint(pos) and controlSelect:\r\n # Takes you out of the controller select menu\r\n controlSelect = False\r\n # Takes you in to the game\r\n playing = True\r\n # Sets that you are not using mouse controls\r\n useMouse = False\r\n # Checks if you click Continue or Reset\r\n if menu.keepPlaying.collidepoint(pos):\r\n # Checks if you won\r\n if gameOver == 1:\r\n # Moves the ball the its starting position\r\n ball.rect.x = ball.x\r\n ball.rect.y = ball.y\r\n # Starts playing the game\r\n playing = True\r\n # Sets game to not be over\r\n gameOver = 0\r\n ball.gameOver = 0\r\n # Deletes old bricks\r\n bricks.bricks.clear()\r\n # Creates new bricks\r\n bricks.createBricks()\r\n # Checks if you click back to menu and if gameOver is not 1\r\n if menu.toMenu.collidepoint(pos) and gameOver != 0:\r\n # Moves the ball the its starting position\r\n ball.rect.x = ball.x\r\n ball.rect.y = ball.y\r\n # Bring you back to the menu\r\n mainMenu = True\r\n # Sets game to not be over\r\n gameOver = 0\r\n ball.gameOver = 0\r\n # Resets the score\r\n score = 0\r\n # Deletes old bricks\r\n bricks.bricks.clear()\r\n # Creates new bricks\r\n bricks.createBricks()\r\n\r\n # Updates the canvas\r\n pygame.display.update()\r\n\r\n # Limits the frames per second to 60\r\n pygame.time.Clock().tick(60)\r\n\r\n# Exits the game\r\npygame.QUIT\r\n# End of program.\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 18335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 289, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 296, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 296, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 300, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 301, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 302, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 303, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 304, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 348, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 348, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 351, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 351, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 352, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 399, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 399, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 401, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 401, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 403, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 465, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 465, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 468, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 468, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 471, "usage_type": "attribute"}]}
+{"seq_id": "237632004", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport os\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\n#from sklearn.feature_extraction.text import TfidfTransformer\n\nimport nltk\nfrom nltk.stem.porter import PorterStemmer\n\n\n# In[2]:\n\n\ncount = CountVectorizer() #from sklearn.feature_extraction.text import CountVectorizer\ndocs = np.array([\n 'The sun is shining',\n 'The weather is sweet',\n 'The sun is shining, the weather is sweet, and one and one is two'])\nbag = count.fit_transform(docs)\n\n\n# In[3]:\n\n\nprint(count.vocabulary_) # vocabulary_ attribute of CountVectorizer() shows a mapping of terms to feature indices.\n\n\n# In[4]:\n\n\nprint(bag.toarray())\n\n\n# In[5]:\n\n\ncount_2 = CountVectorizer(ngram_range=(1,2))\nbag_2 = count_2.fit_transform(docs)\nprint(count_2.vocabulary_)\nprint(bag_2.toarray())\n\n\n# In[6]:\n\n\nnp.set_printoptions(precision=2) # These options determine the way floating point numbers are displayed.\n\n\n# In[7]:\n\n\ntfidf = TfidfTransformer(use_idf=True, \n norm='l2', \n smooth_idf=True)\nprint(tfidf.fit_transform(count.fit_transform(docs))\n .toarray())\n\n\n# In[8]:\n\n\ntf_is = 3 # suppose term \"is\" has a frequency of 3\nn_docs = 3\nidf_is = np.log((n_docs+1) / (3+1))\ntfidf_is = tf_is * (idf_is + 1)\nprint('tf-idf of term \"is\" = %.2f' % tfidf_is)\n\n\n# In[9]:\n\n\ntfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True)\nraw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1]\nraw_tfidf \n\n\n# In[10]:\n\n\nl2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2))\nl2_tfidf\n\n\n# In[11]:\n\n\ncorpus = [\n 'This is the first document.',\n 'This document is the second document.',\n 'And this is the third one.',\n 'Is this the first document?',\n ]\n\nvectorizer = TfidfVectorizer()\nX = vectorizer.fit_transform(corpus)\nprint(vectorizer.get_feature_names())\n\nprint(X.shape)\n\n\n# In[12]:\n\n\nvectorizer_123 = TfidfVectorizer(ngram_range=(1,3))\nX_123 = vectorizer_123.fit_transform(corpus)\nprint(vectorizer_123.get_feature_names())\n\nprint(X_123.shape)\n\n\n# In[13]:\n\n\nvectorizer_mm = TfidfVectorizer(max_df=1.0,min_df=0.5)\nX_mm = vectorizer_mm.fit_transform(corpus)\nprint(vectorizer_mm.get_feature_names())\n\nprint(X_mm.shape)\n\n\n# In[14]:\n\n\ndf = pd.read_csv('movie_data_cat.csv', encoding='utf-8')\ndf.head(10)\n\n\n# In[15]:\n\n\ndf.shape\ndf.columns\n\n\n# In[16]:\n\n\nclass_mapping = {label:idx for idx,label in enumerate(np.unique(df['sentiment']))}\n\nprint(class_mapping)\n\n#use the mapping dictionary to transform the class labels into integers\n\ndf['sentiment'] = df['sentiment'].map(class_mapping)\ndf.head(10)\n\n\n# In[17]:\n\n\ndf.loc[5635, 'review']#[-50:]\n\n\n# In[18]:\n\n\n#import regular expressions to clean up the text\nimport re\ndef preprocessor(text):\n text = re.sub('<[^>]*>', '', text) # remove all html markup\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text) # findall the emoticons\n \n # remove the non-word chars '[\\W]+'\n # append the emoticons to end \n #convert all to lowercase\n # remove nose char for consistency\n text = (re.sub('[\\W]+', ' ', text.lower()) +\n ' '.join(emoticons).replace('-', '')) \n return text\n\n\n# In[19]:\n\n\npreprocessor(df.loc[3635, 'review'])#[-50:]\n\n\n# ## Apply the clean data preprocessor to the text\n\n# In[20]:\n\n\npreprocessor(\"This :) is :( a test :-)!\")\n\n\n# In[21]:\n\n\n# apply the preprocessor to the entire dataframe (i.e. column review)\ndf['review'] = df['review'].apply(preprocessor)\n\n\n# ## Tokenise - break text into tokens\n\n# In[22]:\n\n\ndef tokenizer(text):\n return text.split()\n\n\n# In[23]:\n\n\nprint(tokenizer(\"Tokenise this sentence into its individual words\"))\n\n\n# In[24]:\n\n\nfrom nltk.corpus import stopwords \n\nnltk.download('stopwords')\n\n\n# create a method to accept a piece of tokenised text and return text back without the stopped words\n\n# In[25]:\n\n\nstop = set(stopwords.words('english'))\ndef stop_removal(text):\n return [w for w in text if not w in stop]\n\n\n# In[26]:\n\n\ntext = \"This is a sample sentence, demonstrating the removal of stop words.\"\nstopped_text = stop_removal(text.split())\nprint(stopped_text) \n\n\n# ## Stemming - Processing tokens into their root form\n\n# In[27]:\n\n\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\n\n#See which languages are supported.\nprint(\" \".join(SnowballStemmer.languages))\n\n\n# In[28]:\n\n\n#get the english stemmer\nstemmer = SnowballStemmer(\"english\")\n\n#stem a word\nprint(stemmer.stem(\"running\"))\n\n\n# In[29]:\n\n\n#Decide not to stem stopwords with ignore_stopwords\nstemmer2 = SnowballStemmer(\"english\", ignore_stopwords=True)\n\n#compare the two versions of the stemmer\nprint(stemmer.stem(\"having\"))\n\nprint(stemmer2.stem(\"having\"))\n\n\n# In[30]:\n\n\n#The 'english' stemmer is better than the original 'porter' stemmer.\nprint(SnowballStemmer(\"english\").stem(\"generously\"))\n\nprint(SnowballStemmer(\"porter\").stem(\"generously\"))\n\n\n# # Tokenise + Stemming \n\n# In[31]:\n\n\ndef tokenizer_stemmer(text):\n return [stemmer.stem(word) for word in tokenizer(text)]#text.split()]\n\n\n# In[32]:\n\n\ntokenizer('runners like running and thus they run')\n\n\n# In[33]:\n\n\ntokenizer_stemmer('runners like running and thus they run')\n\n\n# You can clearly see from the code above the effect of the stemmer on the tokens\n\n# In[34]:\n\n\nfrom nltk.corpus import stopwords\n\nstop = stopwords.words('english')\n[w for w in tokenizer_stemmer('A runner likes running and runs a lot')[-8:]\nif w.lower() not in stop]\n\n\n# # Training a model for sentiment classification\n\n# In[35]:\n\n\nX_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\nX_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values\n\n### smaller sample\nX_train = df.loc[:2500, 'review'].values\ny_train = df.loc[:2500, 'sentiment'].values\n\n\n# In[36]:\n\n\nparam_grid = [{'vect__ngram_range': [(1, 1)], #can also extract 2-grams of words in addition to the 1-grams (individual words)\n 'vect__stop_words': [stop, None], # use the stop dictionary of stopwords or not\n 'vect__tokenizer': [tokenizer_stemmer]}, # use a tokeniser and the stemmer \n ]\n\n\n# In[38]:\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = [{'vect__ngram_range': [(1, 5)], #can also extract 2-grams of words in addition to the 1-grams (individual words)\n 'vect__stop_words': [stop, None], # use the stop dictionary of stopwords or not\n 'vect__tokenizer': [tokenizer]}, # use a tokeniser and the stemmer \n ]\n\ntfidf = TfidfVectorizer(strip_accents=None,\n lowercase=False,\n preprocessor=None)\n\n\nmnb_tfidf = Pipeline([('vect', tfidf),\n ('clf', KNeighborsClassifier(n_neighbors=5))])\n\n\n \ngs_mnb_tfidf = GridSearchCV(mnb_tfidf, param_grid,\n scoring='accuracy',\n cv=5,\n verbose=1,\n n_jobs=1) \n\ngs_mnb_tfidf.fit(X_train, y_train)\nprint('Best parameter set: %s ' % gs_mnb_tfidf.best_params_)\nprint('CV Accuracy: %.3f' % gs_mnb_tfidf.best_score_)\nclf = gs_mnb_tfidf.best_estimator_\nprint('Test Accuracy: %.3f' % clf.score(X_test, y_test))\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "KNN_IMDB.py", "file_name": "KNN_IMDB.py", "file_ext": "py", "file_size_in_byte": 7556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 145, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 167, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 168, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 174, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 220, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 228, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 228, "usage_type": "name"}, {"api_name": "nltk.stem.snowball.SnowballStemmer.languages", "line_number": 250, "usage_type": "attribute"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 250, "usage_type": "name"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 257, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 267, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 279, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 281, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 312, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 312, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 356, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 361, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 362, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 366, "usage_type": "call"}]}
+{"seq_id": "452999208", "text": "# Main file for Mynes Game project\n# Controls the game state based on player inputs and updates MynesBoard\n\nfrom MynesBoard import *\n# from MyneGUI import *\nimport pygame\n\nWHITE = (255, 0, 0)\nBLACK = (255, 255, 255)\nICON_SIZE = 24\n\n\nclass Mynes:\n \"\"\"\n This class is the main running Mynes game.\n\n === Attributes ===\n screen: uses the screen from MynesGUI\n board: uses the board from MyneBoard\n flag_count: Keeps track of how many flags the player has available to place\n\n \"\"\"\n\n # === Private Attributes ===\n # _running: pygame attribute that runs or stops the game\n # _flags_placed: Keeps track of how many flag objects are on the board\n\n game_board: MynesBoard\n # GUI: MynesGUI\n flag_count: int\n _running: bool\n\n # ---------Mynes methods--------- #\n def __init__(self):\n \"\"\"\n Create a Mynes game that has a list of players (mines, numbers, empty spaces, etc)\n \"\"\"\n self._running = False\n self._lost = False\n self.game_board = MynesBoard()\n # self.GUI = MynesGUI()\n self.screen = None\n self.flag_count = self.game_board.mine_count\n # Windows size in pixels\n self.width, self.height = self.game_board.width * ICON_SIZE, self.game_board.height * ICON_SIZE\n\n def get_number(self, x, y) -> int:\n \"\"\"\n :param x: x-coordinate on board\n :param y: y-coordinate on board\n :return: Number at (x,y) on the board.\n \"\"\"\n return self.board[x][y].number\n\n def get_flag(self, x, y) -> bool:\n \"\"\"\n :param x: x-coordinate on board\n :param y: y-coordinate on board\n :return: If a flag is placed at (x,y) on the board.\n \"\"\"\n return self.board[x][y].flagged\n\n def mynes_won(self) -> bool:\n \"\"\"\n :return: If player has won the game by flagging all mines.\n \"\"\"\n if self.flag_count > 0:\n return False\n else:\n x = 0\n y = 0\n for x in range(len(self.width)):\n for y in range(len(self.height)):\n # Spot has mine but no flag\n if (self.game_board.board[x][y].value == -1) and (self.game_board.board[x][y].flag == False):\n return False\n\n return True\n\n def mynes_lost(self) -> None:\n \"\"\"\n Mark the game as 'lost' if the player clicks a mine.\n \"\"\"\n # Generate a fail message and text box to be printed in screen center\n font = pygame.font.Font('freesansbold.ttf', 16)\n fail_text = font.render(\"FAIL, CLICK TO EXIT\", True, WHITE, BLACK)\n fail_box = fail_text.get_rect()\n fail_box.center = (self.width//2, self.height//2)\n\n self.screen.blit(fail_text, fail_box)\n pygame.display.flip()\n\n # End game\n self._lost = True\n\n # ---------Pygame Methods---------- #\n def on_init(self) -> None:\n \"\"\"\n Initialize the game's screen, and begin running the game.\n \"\"\"\n\n pygame.init()\n self.screen = pygame.display.set_mode \\\n ((self.width, self.height), pygame.HWSURFACE | pygame.DOUBLEBUF)\n self._running = True\n\n def on_event(self, event: pygame.event) -> None:\n \"\"\"\n React to the given as appropriate. Either the player makes a move or quits the game.\n \"\"\"\n if event.type == pygame.QUIT:\n self._running = False\n # player clicks when game is lost\n elif event.type == pygame.MOUSEBUTTONUP and self._lost:\n self._running = False\n # player clicks when game is running\n elif event.type == pygame.MOUSEBUTTONUP:\n (x, y) = pygame.mouse.get_pos()\n # Loop through MyneSquare objects\n for board_y in range(self.game_board.height):\n for board_x in range(self.game_board.width):\n square = self.game_board.board[board_x][board_y]\n # Square that mouse is over\n if square.hitbox.collidepoint(x, y):\n # 1 for left click, 3 for right click\n if event.button == 1:\n if square.value == -1:\n self.mynes_lost()\n # else:\n # self.board.clear_spaces\n # Right click for Flagging\n elif event.button == 3:\n # Remove Flag\n if square.flag:\n square.flag = False\n self.flag_count += 1\n square.icon = pygame.image.load(\"temp_empty.png\")\n # Don't Place Flag\n elif (not square.flag) and self.flag_count == 0:\n pass\n # Place Flag\n else:\n square.flag = True\n self.flag_count -= 1\n square.icon = pygame.image.load(\"temp_flag.png\")\n\n def quit(self) -> None:\n \"\"\"\n Clean up and close the game.\n \"\"\"\n\n pygame.quit()\n\n def render(self) -> None:\n \"\"\"\n Call MynesGUI to render the pygame screen.\n \"\"\"\n # Stop accepting player inputs when game is lost\n if not self._lost:\n font = pygame.font.Font('freesansbold.ttf', 12)\n for x in range(self.game_board.width):\n for y in range(self.game_board.height):\n # number = font.render(str(self.game_board.board[x][y].value), True, WHITE, BLACK)\n box = pygame.Rect(x * ICON_SIZE, y * ICON_SIZE, ICON_SIZE, ICON_SIZE)\n # box = self.game_board.board[x][y].hitbox\n self.screen.blit(self.game_board.board[x][y].icon, box)\n pygame.display.update()\n\n def execute(self) -> None:\n \"\"\"\n Run the game until the game ends.\n \"\"\"\n print(\"running\")\n self.on_init()\n print(\"running\")\n self.screen.fill(WHITE)\n while self._running:\n\n for event in pygame.event.get():\n self.on_event(event)\n self.render()\n\n self.quit()\n", "sub_path": "Mynes.py", "file_name": "Mynes.py", "file_ext": "py", "file_size_in_byte": 6359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pygame.font.Font", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.event", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 179, "usage_type": "attribute"}]}
+{"seq_id": "31503839", "text": "# coding: utf-8\n\n\"\"\"\n MessageMedia REST API\n\n Australia's Leading Messaging Solutions for Business and Enterprise.\n\n OpenAPI spec version: 1.0.0\n Contact: support@messagemedia.com\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass DeliveryOptionsBodyInner(object):\n \"\"\"\n Do not edit the class manually.\n \"\"\"\n def __init__(self, delivery_type=None, delivery_addresses=None, delivery_format=None):\n \"\"\"\n DeliveryOptionsBodyInner - a model\n\n :param dict types: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.types = {\n 'delivery_type': 'str',\n 'delivery_addresses': 'list[str]',\n 'delivery_format': 'str'\n }\n\n self.attribute_map = {\n 'delivery_type': 'delivery_type',\n 'delivery_addresses': 'delivery_addresses',\n 'delivery_format': 'delivery_format'\n }\n\n self._delivery_type = delivery_type\n self._delivery_addresses = delivery_addresses\n self._delivery_format = delivery_format\n\n @property\n def delivery_type(self):\n \"\"\"\n Gets the delivery_type of this DeliveryOptionsBodyInner.\n How to deliver the report.\n\n :return: The delivery_type of this DeliveryOptionsBodyInner.\n :rtype: str\n \"\"\"\n return self._delivery_type\n\n @delivery_type.setter\n def delivery_type(self, delivery_type):\n \"\"\"\n Sets the delivery_type of this DeliveryOptionsBodyInner.\n How to deliver the report.\n\n :param delivery_type: The delivery_type of this DeliveryOptionsBodyInner.\n :type: str\n \"\"\"\n allowed_values = [\"EMAIL\"]\n if delivery_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `delivery_type` ({0}), must be one of {1}\"\n .format(delivery_type, allowed_values)\n )\n\n self._delivery_type = delivery_type\n\n @property\n def delivery_addresses(self):\n \"\"\"\n Gets the delivery_addresses of this DeliveryOptionsBodyInner.\n A list of email addresses to use as the recipient of the email. Only works for EMAIL delivery type\n\n :return: The delivery_addresses of this DeliveryOptionsBodyInner.\n :rtype: list[str]\n \"\"\"\n return self._delivery_addresses\n\n @delivery_addresses.setter\n def delivery_addresses(self, delivery_addresses):\n \"\"\"\n Sets the delivery_addresses of this DeliveryOptionsBodyInner.\n A list of email addresses to use as the recipient of the email. Only works for EMAIL delivery type\n\n :param delivery_addresses: The delivery_addresses of this DeliveryOptionsBodyInner.\n :type: list[str]\n \"\"\"\n\n self._delivery_addresses = delivery_addresses\n\n @property\n def delivery_format(self):\n \"\"\"\n Gets the delivery_format of this DeliveryOptionsBodyInner.\n Format of the report.\n\n :return: The delivery_format of this DeliveryOptionsBodyInner.\n :rtype: str\n \"\"\"\n return self._delivery_format\n\n @delivery_format.setter\n def delivery_format(self, delivery_format):\n \"\"\"\n Sets the delivery_format of this DeliveryOptionsBodyInner.\n Format of the report.\n\n :param delivery_format: The delivery_format of this DeliveryOptionsBodyInner.\n :type: str\n \"\"\"\n allowed_values = [\"CSV\"]\n if delivery_format not in allowed_values:\n raise ValueError(\n \"Invalid value for `delivery_format` ({0}), must be one of {1}\"\n .format(delivery_format, allowed_values)\n )\n\n self._delivery_format = delivery_format\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n", "sub_path": "messagemedia_rest_api/models/delivery_options_body_inner.py", "file_name": "delivery_options_body_inner.py", "file_ext": "py", "file_size_in_byte": 5851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "six.iteritems", "line_number": 145, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 169, "usage_type": "call"}]}
+{"seq_id": "246664637", "text": "#!/usr/bin/python3\n\"\"\"\nModule related with City class\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\n\n\n@app_views.route(\"states//cities\", methods=['GET'],\n strict_slashes=False)\ndef get_them_all_city(state_id):\n \"\"\"Retrive all cities from a given state\"\"\"\n city_state = storage.get(State, state_id)\n if city_state is None:\n abort(404)\n ret_list = []\n for city in city_state.cities:\n ret_list.append(city.to_dict())\n return jsonify(ret_list)\n\n\n@app_views.route(\"/cities/\", methods=['GET'],\n strict_slashes=False)\ndef get_city(city_id):\n \"\"\"Retrive object city from their id\"\"\"\n obj_city = storage.get(City, city_id)\n if obj_city is None:\n abort(404)\n return (jsonify(obj_city.to_dict()))\n\n\n@app_views.route(\"/cities/\", methods=['DELETE'],\n strict_slashes=False)\ndef delete_city(city_id):\n \"\"\"Delete an instance of a city\"\"\"\n del_obj = storage.get(City, city_id)\n if del_obj is not None:\n storage.delete(del_obj)\n storage.save()\n return jsonify({})\n else:\n abort(404)\n\n\n@app_views.route(\"/states//cities\", methods=['POST'],\n strict_slashes=False)\ndef post_city(state_id):\n \"\"\"Add an instance of a city\"\"\"\n if storage.get(State, state_id) is None:\n abort(404)\n if request.is_json:\n data = request.get_json()\n if \"name\" not in data:\n abort(400, \"Missing name\")\n new_city = City()\n setattr(new_city, \"state_id\", state_id)\n for k, v in data.items():\n setattr(new_city, k, v)\n new_city.save()\n return jsonify(new_city.to_dict()), 201\n else:\n abort(400, \"Not a JSON\")\n\n\n@app_views.route(\"/cities/\", methods=['PUT'],\n strict_slashes=False)\ndef put_city(city_id):\n \"\"\"Update an instance of a city\"\"\"\n obj = storage.get(City, city_id)\n if obj is None:\n abort(404)\n update = request.get_json()\n if update is not None:\n for k, v in update.items():\n if k not in [\"id\", \"state_id\", \"created_at\", \"updated_at\"]:\n setattr(obj, k, v)\n storage.save()\n return jsonify(obj.to_dict())\n else:\n abort(400, \"Not a JSON\")\n", "sub_path": "api/v1/views/cities.py", "file_name": "cities.py", "file_ext": "py", "file_size_in_byte": 2412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.storage.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 13, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 13, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 30, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 26, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 26, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 40, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 40, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 40, "usage_type": "name"}, {"api_name": "models.storage.delete", "line_number": 42, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 42, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 43, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 36, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 36, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.state.State", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 58, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 66, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 49, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 49, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 73, "usage_type": "call"}, {"api_name": "models.city.City", "line_number": 73, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 81, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 84, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 69, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 69, "usage_type": "name"}]}
+{"seq_id": "177974601", "text": "from pycram.process_module import ProcessModule\nfrom pycram.bullet_world import BulletWorld\nfrom pycram.helper import transform\nimport pycram.bullet_world_reasoning as btr\nimport pybullet as p\nimport numpy as np\nimport time\n\nright_arm_park = {\"r_shoulder_pan_joint\" : -1.712,\n \"r_shoulder_lift_joint\" : -0.256,\n \"r_upper_arm_roll_joint\" : -1.463,\n \"r_elbow_flex_joint\" : -2.12,\n \"r_forearm_roll_joint\" : 1.766,\n \"r_wrist_flex_joint\" : -0.07,\n \"r_wrist_roll_joint\" : 0.051}\nleft_arm_park = {\"l_shoulder_pan_joint\" : 1.712,\n \"l_shoulder_lift_joint\" : -0.264,\n \"l_upper_arm_roll_joint\" : 1.38,\n \"l_elbow_flex_joint\" : -2.12,\n \"l_forearm_roll_joint\" : 16.996,\n \"l_wrist_flex_joint\" : -0.073}\nik_joints = [\"fl_caster_rotation_joint\", \"fl_caster_l_wheel_joint\", \"fl_caster_r_wheel_joint\",\n \"fr_caster_rotation_joint\", \"fr_caster_l_wheel_joint\", \"fr_caster_r_wheel_joint\",\n \"bl_caster_rotation_joint\", \"bl_caster_l_wheel_joint\", \"bl_caster_r_wheel_joint\",\n \"br_caster_rotation_joint\", \"br_caster_l_wheel_joint\", \"br_caster_r_wheel_joint\",\n \"head_pan_joint\", \"head_tilt_joint\", \"laser_tilt_mount_joint\", \"r_shoulder_pan_joint\",\n \"r_shoulder_lift_joint\", \"r_upper_arm_roll_joint\", \"r_elbow_flex_joint\",\n \"r_forearm_roll_joint\", \"r_wrist_flex_joint\", \"r_wrist_roll_joint\",\n \"r_gripper_motor_slider_joint\", \"r_gripper_motor_screw_joint\",\n \"r_gripper_l_finger_joint\", \"r_gripper_l_finger_tip_joint\",\n \"r_gripper_r_finger_joint\", \"r_gripper_r_finger_tip_joint\",\n \"r_gripper_joint\", \"l_shoulder_pan_joint\", \"l_shoulder_lift_joint\",\n \"l_upper_arm_roll_joint\", \"l_elbow_flex_joint\", \"l_forearm_roll_joint\",\n \"l_wrist_flex_joint\", \"l_wrist_roll_joint\", \"l_gripper_motor_slider_joint\",\n \"l_gripper_motor_screw_joint\", \"l_gripper_l_finger_joint\",\n \"l_gripper_l_finger_tip_joint\", \"l_gripper_r_finger_joint\",\n \"l_gripper_r_finger_tip_joint\", \"l_gripper_joint\", \"torso_lift_motor_screw_joint\"]\n\n\ndef _apply_ik(robot, joint_poses):\n \"\"\"\n Applies a list of joint poses calculated by an inverse kinematics solver to a robot\n :param robot: The robot the joint poses should be applied on\n :param joint_poses: The joint poses to be applied\n :return: None\n \"\"\"\n for i in range(0, len(ik_joints)):\n robot.set_joint_state(ik_joints[i], joint_poses[i])\n\n\ndef _park_arms(arm):\n \"\"\"\n Defines the joint poses for the parking positions of the arms of the PR2 and applies them to the, in the BulletWorld\n defined robot.\n :return:\n \"\"\"\n #joint_poses = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.9, -0.1, 1.6, 1.7,\n # 0.087, 1.2, -1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9, -0.1, 1.6,\n # -1.7, -0.08, -1.2, 1.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n robot = BulletWorld.robot\n if arm == \"right\":\n for joint, pose in right_arm_park.items():\n robot.set_joint_state(joint, pose)\n if arm == \"left\":\n for joint, pose in left_arm_park.items():\n robot.set_joint_state(joint, pose)\n\n\nclass Pr2Navigation(ProcessModule):\n \"\"\"\n The process module to move the robot from one position to another.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'navigate':\n robot = BulletWorld.robot\n robot.set_position_and_orientation(solution['target'], solution['orientation'])\n\nclass Pr2PickUp(ProcessModule):\n \"\"\"\n This process module is for picking up a given object.\n The object has to be reachable for this process module to succeed.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'pick':\n obj = solution['object']\n robot = BulletWorld.robot\n target = obj.prop_value(\"pose\")\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(solution['gripper']), target,\n maxNumIterations=100)\n _apply_ik(robot, inv)\n robot.attach(obj.prop_value(\"bullet_obj\"), solution['gripper'])\n time.sleep(0.5)\n\n\nclass Pr2Place(ProcessModule):\n \"\"\"\n This process module places an object at the given position in world coordinate frame.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == 'place':\n obj = solution['object']\n robot = BulletWorld.robot\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(solution['gripper']), solution['target'],\n maxNumIterations=100)\n _apply_ik(robot, inv)\n robot.detach(obj.prop_value(\"bullet_obj\"))\n time.sleep(0.5)\n\nclass PR2EnvironmentManipulation(ProcessModule):\n \"\"\"\n This process module responsible for opening and closing container to access the objects inside. This works by firstly moving\n the end effector to the handle of the container. Next, the end effector is moved the respective distance to the back.\n This provides the illusion the robot would open the drawer by himself.\n Then the drawer will be opened by setting the joint pose of the drawer joint.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n kitchen = solution['part-of']\n if type(kitchen) is str:\n kitchen = BulletWorld.current_bullet_world.get_objects_by_name(kitchen)[0]\n\n if solution['cmd'] == 'open-prismatic' or solution['cmd'] == 'close-prismatic':\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n handle_pose = kitchen.get_link_position(container_handle)\n if solution['cmd'] == 'open-prismatic':\n distance = solution['distance']\n print(\"Process module distance: \" + str(distance))\n new_pose = [handle_pose[0] - distance, handle_pose[1], handle_pose[2]]\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), new_pose)\n _apply_ik(robot, inv)\n kitchen.set_joint_state(container_joint, distance)\n elif solution['cmd'] == 'close-prismatic':\n distance = kitchen.get_joint_state(container_joint)\n new_pose = [handle_pose[0] + distance, handle_pose[1], handle_pose[2]]\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), new_pose)\n _apply_ik(robot, inv)\n kitchen.set_joint_state(container_joint, 0.0)\n time.sleep(0.2)\n\n if solution['cmd'] == \"open-rotational\":\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n distance = solution['distance']\n kitchen.set_joint_state(container_joint, distance)\n handle_pose = kitchen.get_link_position(container_handle)\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), handle_pose)\n _apply_ik(robot, inv)\n\n if solution['cmd'] == \"close-rotational\":\n # kitchen = solution['part-of']\n robot = BulletWorld.robot\n gripper = solution['gripper']\n container_handle = solution['handle']\n container_joint = solution['joint']\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper),\n kitchen.get_link_position(container_handle))\n _apply_ik(robot, inv)\n time.sleep(0.2)\n distance = 0.0\n kitchen.set_joint_state(container_joint, distance)\n handle_pose = kitchen.get_link_position(container_handle)\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), handle_pose)\n _apply_ik(robot, inv)\n time.sleep(0.2)\n\nclass Pr2ParkArms(ProcessModule):\n \"\"\"\n This process module is for moving the arms in a parking position.\n It is currently not used.\n \"\"\"\n def _execute(self, desig):\n solutions = desig.reference()\n if solutions['cmd'] == 'park':\n _park_arms()\n\n\nclass Pr2MoveHead(ProcessModule):\n \"\"\"\n This process module moves the head to look at a specific point in the world coordinate frame.\n This point can either be a position or an object.\n \"\"\"\n def _execute(self, desig):\n solutions = desig.reference()\n if solutions['cmd'] == 'looking':\n target = solutions['target']\n robot = BulletWorld.robot\n pose_in_pan = transform(target, robot.get_link_position(\"head_pan_link\"))\n pose_in_tilt = transform(target, robot.get_link_position(\"head_tilt_link\"))\n\n new_pan = np.arctan([pose_in_pan[1], pose_in_pan[0]])\n new_tilt = np.arctan([-pose_in_tilt[2], pose_in_tilt[0]**2 + pose_in_tilt[1]**2])\n\n robot.set_joint_state(\"head_pan_joint\", new_pan[0])\n robot.set_joint_state(\"head_tilt_joint\", new_tilt[0])\n\n\nclass Pr2MoveGripper(ProcessModule):\n \"\"\"\n This process module controls the gripper of the robot. They can either be opened or closed.\n Furthermore, it can only moved one gripper at a time.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-gripper\":\n robot = BulletWorld.robot\n gripper = solution['gripper']\n motion = solution['motion']\n robot.set_joint_state(\"r_gripper_l_finger_joint\" if gripper == 'right' else \"l_gripper_l_finger_joint\",\n 0 if motion == \"close\" else 0.548)\n robot.set_joint_state(\"r_gripper_r_finger_joint\" if gripper == 'right' else \"l_gripper_r_finger_joint\",\n 0 if motion == \"close\" else 0.548)\n time.sleep(0.5)\n\n\nclass Pr2Detecting(ProcessModule):\n \"\"\"\n This process module tries to detect an object with the given type. To be detected the object has to be in\n the field of view of the robot.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"detecting\":\n robot = BulletWorld.robot\n object_type = solution['object']\n cam_frame_name = solution['cam_frame']\n front_facing_axis = solution['front_facing_axis']\n\n objects = BulletWorld.current_bullet_world.get_objects_by_type(object_type)\n for obj in objects:\n if btr.visible(obj, robot.get_link_position_and_orientation(cam_frame_name), front_facing_axis):\n return obj\n\n\nclass Pr2MoveTCP(ProcessModule):\n \"\"\"\n This process module moves the tool center point of either the right or the left arm.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-tcp\":\n target = solution['target']\n gripper = solution['gripper']\n robot = BulletWorld.robot\n inv = p.calculateInverseKinematics(robot.id, robot.get_link_id(gripper), target)\n _apply_ik(robot, inv)\n time.sleep(0.5)\n\n\nclass Pr2MoveJoints(ProcessModule):\n \"\"\"\n This process modules moves the joints of either the right or the left arm. The joint states can be given as\n list that should be applied or a pre-defined position can be used, such as \"parking\"\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"move-joints\":\n robot = BulletWorld.robot\n right_arm_poses = solution['right-poses']\n left_arm_poses = solution['left-poses']\n if type(right_arm_poses) == dict:\n for joint, pose in right_arm_poses.items():\n robot.set_joint_state(joint, pose)\n elif type(right_arm_poses) == str and right_arm_poses == \"park\":\n _park_arms(\"right\")\n\n if type(left_arm_poses) == dict:\n for joint, pose in left_arm_poses.items():\n robot.set_joint_state(joint, pose)\n elif type(left_arm_poses) == str and left_arm_poses == \"park\":\n _park_arms(\"left\")\n\n time.sleep(0.5)\n\n\nclass Pr2WorldStateDetecting(ProcessModule):\n \"\"\"\n This process module detectes an object even if it is not in the field of view of the robot.\n \"\"\"\n def _execute(self, desig):\n solution = desig.reference()\n if solution['cmd'] == \"world-state-detecting\":\n obj_type = solution['object']\n return list(filter(lambda obj: obj.type == obj_type, BulletWorld.current_bullet_world.objects))[0]\n\n\npr2_navigation = Pr2Navigation()\npr2_pick_up = Pr2PickUp()\npr2_place = Pr2Place()\npr2_park_arms = Pr2ParkArms()\npr2_move_head = Pr2MoveHead()\npr2_move_gripper = Pr2MoveGripper()\npr2_detecting = Pr2Detecting()\npr2_move_tcp = Pr2MoveTCP()\npr2_move_joints = Pr2MoveJoints()\npr2_world_state_detecting = Pr2WorldStateDetecting()\npr2_environment_manipulation = PR2EnvironmentManipulation()\n\n\ndef available_process_modules(desig):\n \"\"\"\n This method chooses the right process module for the given designator and returns it.\n :param desig: The designator for which a process module should be choosen.\n :return: The choosen process module\n \"\"\"\n if desig.check_constraints([('type', 'moving')]):\n return pr2_navigation\n\n if desig.check_constraints([('type', 'pick-up')]):\n return pr2_pick_up\n\n if desig.check_constraints([('type', 'place')]):\n return pr2_place\n\n if desig.check_constraints([('type', 'park-arms')]):\n return pr2_park_arms\n\n if desig.check_constraints([('type', 'looking')]):\n return pr2_move_head\n\n if desig.check_constraints([('type', 'opening-gripper')]):\n return pr2_move_gripper\n\n if desig.check_constraints([('type', 'closing-gripper')]):\n return pr2_move_gripper\n\n if desig.check_constraints([('type', 'detecting')]):\n return pr2_detecting\n\n if desig.check_constraints([('type', 'move-tcp')]):\n return pr2_move_tcp\n\n if desig.check_constraints([('type', 'move-arm-joints')]):\n return pr2_move_joints\n\n if desig.check_constraints([('type', 'world-state-detecting')]):\n return pr2_world_state_detecting\n\n if desig.check_constraints([('type', 'opening-prismatic')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'closing-prismatic')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'opening-rotational')]):\n return pr2_environment_manipulation\n\n if desig.check_constraints([('type', 'closing-rotational')]):\n return pr2_environment_manipulation\n\n\nProcessModule.resolvers.append(available_process_modules)\n", "sub_path": "demos/pycram_tasktree_demo/scripts/process_modules.py", "file_name": "process_modules.py", "file_ext": "py", "file_size_in_byte": 15895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 61, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 70, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 77, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 80, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 89, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 98, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 106, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 113, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world.get_objects_by_name", "line_number": 124, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 124, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 128, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 132, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 135, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 141, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 147, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 150, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 154, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 158, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 161, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 165, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 170, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 174, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 181, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 183, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 185, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 196, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 205, "usage_type": "name"}, {"api_name": "pycram.helper.transform", "line_number": 206, "usage_type": "call"}, {"api_name": "pycram.helper.transform", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 210, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 216, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 224, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 231, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 234, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 242, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world.get_objects_by_type", "line_number": 247, "usage_type": "call"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 247, "usage_type": "name"}, {"api_name": "pycram.bullet_world_reasoning.visible", "line_number": 249, "usage_type": "call"}, {"api_name": "pycram.bullet_world_reasoning", "line_number": 249, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 253, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 262, "usage_type": "name"}, {"api_name": "pybullet.calculateInverseKinematics", "line_number": 263, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 265, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 268, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.robot", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 276, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 291, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 294, "usage_type": "name"}, {"api_name": "pycram.bullet_world.BulletWorld.current_bullet_world", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pycram.bullet_world.BulletWorld", "line_number": 302, "usage_type": "name"}, {"api_name": "pycram.process_module.ProcessModule.resolvers.append", "line_number": 370, "usage_type": "call"}, {"api_name": "pycram.process_module.ProcessModule.resolvers", "line_number": 370, "usage_type": "attribute"}, {"api_name": "pycram.process_module.ProcessModule", "line_number": 370, "usage_type": "name"}]}
+{"seq_id": "326359986", "text": "# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Define a template for a stateful process that produces metrics.\"\"\"\n\nimport attr\n\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.templates import iterative_process\n\n\n@attr.s(frozen=True, eq=False, slots=True)\nclass MeasuredProcessOutput:\n \"\"\"A structure containing the output of a `MeasuredProcess.next` computation.\n\n Attributes:\n state: A structure that will be passed to invocation of\n `MeasuredProcess.next`. Not intended for inspection externally, contains\n implementation details of the process.\n result: The result of the process given the current input and state. Using\n the rules of composition, either passed to input arguments of chained a\n `MeasuredProcess`, or concatenated with outputs of parallel\n `MeasuredProcess`es.\n measurements: Metrics derived from the computation of `result`. Intended for\n surfacing values to track the progress of a process that are not sent to\n chained `MeasuredProcess`es.\n \"\"\"\n state = attr.ib()\n result = attr.ib()\n measurements = attr.ib()\n\n# The type signature of the result of MeasuredProcess must be a named tuple\n# with the following names in the same order.\n_RESULT_FIELD_NAMES = [f.name for f in attr.fields(MeasuredProcessOutput)]\n\n\n# TODO(b/150384321): add method for performing the composition; current proposal\n# include a stadnalone `measure_process.compose(F, G)`, or implementing\n# `G.__call__(F)` to return a new MeasuredProcess.\nclass MeasuredProcess(iterative_process.IterativeProcess):\n \"\"\"A `tff.templates.IterativeProcess` with a specific output signature.\n\n A `tff.templates.MeasuredProcess` is a `tff.templates.IterativeProcess` that\n formalizes the output signature of the `next` property to be a named\n three-tuple ``. This definition enables\n `tff.templates.MeasuredProcess` to be composed following the rules below,\n something that wasn't possible with the more generic, less defined\n `tff.templates.IterativeProcess`.\n\n *Rules of Composition*\n Given two `MeasuredProcess` _F(x)_ and _G(y)_, a new composition _C_ is\n also a `MeasuredProcess` where:\n - `C.state` is the concatenation ``.\n - `C.result` is the result of _G_ applied to the result of\n _F_: `G(G.state, F(F.state, x).result).result`.\n - `C.measurements` is the concatenation ``.\n\n The resulting composition _C_ would have the following type signatures:\n initialize: `( -> )`\n next: `(<, F.input> -> ,\n result=G.result, measurements=)`\n \"\"\"\n\n def __init__(self, initialize_fn, next_fn):\n \"\"\"Creates a `tff.templates.MeasuredProcess`.\n\n Args:\n initialize_fn: A no-arg `tff.Computation` that creates the initial state\n of the measured process.\n next_fn: A `tff.Computation` that defines an iterated function. If\n `initialize_fn` returns a type `S`, then `next_fn` must return a\n `MeasuredProcessOutput` where the `state` attribute matches the type\n `S`, and accept either a single argument of type `S` or multiple\n arguments where the first argument must be of type `S`.\n\n Raises:\n TypeError: `initialize_fn` and `next_fn` are not compatible function\n types, or `next_fn` does not return a `MeasuredProcessOutput`.\n \"\"\"\n super().__init__(initialize_fn, next_fn)\n next_result_type = next_fn.type_signature.result\n if not (isinstance(next_result_type, computation_types.StructWithPythonType)\n and next_result_type.python_container is MeasuredProcessOutput):\n raise TypeError(\n 'MeasuredProcess must return a MeasuredProcessOutput. Received a '\n '({t}): {s}'.format(\n t=type(next_fn.type_signature.result),\n s=next_fn.type_signature.result))\n\n @property\n def next(self):\n \"\"\"A `tff.Computation` that runs one iteration of the process.\n\n Its first argument should always be the current state (originally produced\n by `tff.templates.MeasuredProcess.initialize`), and the return type must be\n a `tff.templates.MeasuredProcessOutput`.\n\n Returns:\n A `tff.Computation`.\n \"\"\"\n return self._next_fn\n", "sub_path": "tensorflow_federated/python/core/templates/measured_process.py", "file_name": "measured_process.py", "file_ext": "py", "file_size_in_byte": 4912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "attr.ib", "line_number": 38, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 39, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 40, "usage_type": "call"}, {"api_name": "attr.s", "line_number": 22, "usage_type": "call"}, {"api_name": "attr.fields", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow_federated.python.core.templates.iterative_process.IterativeProcess", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.python.core.templates.iterative_process", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow_federated.python.core.api.computation_types.StructWithPythonType", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow_federated.python.core.api.computation_types", "line_number": 92, "usage_type": "name"}]}
+{"seq_id": "397885104", "text": "# -*- coding: utf-8 -*-\n\nimport torch\nimport os\nimport pickle\nimport argparse\nimport numpy as np\nimport time\n\nimport pdb\n\n\nfrom torch.optim import Adam\nfrom torch.utils.data import Dataset, DataLoader\nfrom model import Word2Vec, SGNS, Spell2Vec, load_spelling, load_model, SpellHybrid2Vec\nimport linecache\n\nnp.set_printoptions(precision=4, suppress = True)\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', type=str, default='sgns', help=\"model name\")\n parser.add_argument('--data_dir', type=str, help=\"data directory path\")\n parser.add_argument('--save_dir', type=str, help=\"model directory path\")\n parser.add_argument('--eval_dir', type=str, help=\"eval directory path\")\n parser.add_argument('--embedding_size', type=int, default=200, help=\"embedding dimension\")\n parser.add_argument('--model', action='store',type=str, choices=set(['Word2Vec', 'Spell2Vec', 'SpellHybrid2Vec']), default='Word2Vec', help=\"which model to use\")\n parser.add_argument('--num_neg_samples', type=int, default=5, help=\"number of negative samples\")\n parser.add_argument('--epoch', type=int, default=10, help=\"number of epochs\")\n parser.add_argument('--batch_size', type=int, default=2000, help=\"mini-batch size\")\n parser.add_argument('--subsample_threshold', type=float, default=10e-4, help=\"subsample threshold\")\n parser.add_argument('--use_noise_weights', action='store_true', help=\"use weights for negative sampling\")\n parser.add_argument('--window', action='store', type=int, default=5, help=\"context window size\")\n parser.add_argument('--max_vocab', action='store', type=int, default=50000, help='max vocab size for word-level embeddings')\n parser.add_argument('--gpuid', type=int, default=-1, help=\"which gpu to use\")\n #Spell2Vec properties\n parser.add_argument('--char_embedding_size', type=int, default=20, help=\"size of char embeddings\")\n parser.add_argument('--char_composition', type=str, default='RNN',\n help=\"char composition function type\",\n choices=set(['RNN', 'CNN']), required=False)\n parser.add_argument('--dropout', type=float, default=0.3, help='dropout for RNN and projection layer')\n return parser.parse_args()\n\ndef my_collate(batch):\n iwords, owords = zip(* batch)\n iwords = torch.LongTensor(np.concatenate(iwords))\n owords = torch.LongTensor(np.concatenate([ow for ow in owords if ow.size > 0]))\n #target = torch.LongTensor(target)\n return [iwords, owords] #, target]\n\nclass LazyTextDataset(Dataset):\n def __init__(self, corpus_file, word2idx_file, unigram_prob, window = 5, max_vocab=1e8):\n self.corpus_file = corpus_file\n self.unk = ''\n self.bos = ''\n self.eos = ''\n self.bow = ''\n self.eow = ''\n self.pad = ''\n self.word2idx = pickle.load(open(word2idx_file, 'rb'))\n self.max_vocab = max_vocab if max_vocab < len(self.word2idx) else len(self.word2idx)\n ss_t = 0.005 * unigram_prob[3]\n print('effective subsample threshold', ss_t)\n self.ss = 1.0 - np.sqrt(ss_t/ unigram_prob)\n self.ss[[0,1,2]] = 0.0\n self.ss = np.clip(self.ss, 0, 1)\n self._total_data = 0\n self.window = window\n with open(self.corpus_file, \"r\", encoding=\"utf-8\") as f:\n self._total_data = len(f.readlines()) - 1\n self._total_data = 10\n\n\n def skipgram_instances(self, sentence):\n sentence = sentence.strip().split()\n if len(sentence) > 160:\n f = 80.0 / float(len(sentence))\n sentence= [s for s in sentence if np.random.rand() < f]\n iwords = []\n contexts = []\n s_idxs = [self.word2idx[word] \\\n if self.word2idx[word] < self.max_vocab else self.word2idx[self.unk] \\\n for word in sentence \\\n if (word in self.word2idx and self.ss[self.word2idx[word]] < np.random.rand())]\n if len(s_idxs) < 1:\n s_idxs= [self.word2idx[word] \\\n if self.word2idx[word] < self.max_vocab else self.word2idx[self.unk] \\\n for word in sentence \\\n if word in self.word2idx]\n #rands = np.random.rand(len(sentence))\n for i,iword in enumerate(s_idxs):\n #left = [l for l_idx,l in enumerate(s_idxs[:i],0) if self.ss[l] < rands[l_idx]][:self.window]\n left = s_idxs[max(i - self.window, 0): i]\n #right = [r for r_idx,r in enumerate(s_idxs[i+1:],i+1) if self.ss[r] < rands[r_idx]][:self.window]\n right = s_idxs[i + 1: i + 1 + self.window]\n bos_fill = [self.word2idx[self.bos]] * (self.window - len(left))\n eos_fill = [self.word2idx[self.eos]] * (self.window - len(right))\n context = bos_fill + left + right + eos_fill\n iwords.append(iword)\n contexts.append(context)\n return iwords, contexts\n\n def __getitem__(self, idx):\n line = linecache.getline(self.corpus_file, idx + 1)\n iwords, owords = self.skipgram_instances(line)\n iw, ows = np.array(list(iwords)), np.array(list(owords))\n return iw, ows\n\n def __len__(self):\n return self._total_data\n\n\ndef train(args):\n if args.gpuid > -1:\n torch.cuda.set_device(args.gpuid)\n tmp = torch.ByteTensor([0])\n torch.backends.cudnn.enabled = True\n tmp.cuda()\n print(\"using GPU\", args.gpuid)\n print('CUDNN VERSION', torch.backends.cudnn.version())\n else:\n print(\"using CPU\")\n idx2unigram_prob = pickle.load(open(os.path.join(args.data_dir, 'idx2unigram_prob.pkl'), 'rb'))\n idx, unigram_prob = zip(*sorted([(idx, p) for idx, p in idx2unigram_prob.items()]))\n unigram_prob = np.array(unigram_prob)\n if args.use_noise_weights:\n noise_unigram_prob = unigram_prob[:args.max_vocab] ** 0.75\n noise_unigram_prob = noise_unigram_prob / noise_unigram_prob.sum()\n else:\n noise_unigram_prob = None\n if args.model == 'Word2Vec':\n embedding_model = Word2Vec(word_vocab_size=args.max_vocab, embedding_size=args.embedding_size)\n elif args.model == 'Spell2Vec':\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n wordidx2spelling, vocab_size, max_spelling_len = load_spelling(\n os.path.join(args.data_dir, 'wordidx2charidx.pkl'),\n )\n embedding_model = Spell2Vec(wordidx2spelling,\n word_vocab_size=args.max_vocab,\n noise_vocab_size=args.max_vocab, # len(noise_weights) if noise_weights is not None else 20000,\n char_vocab_size=len(char2idx),\n embedding_size=args.embedding_size,\n char_embedding_size=args.char_embedding_size,\n dropout=args.dropout,\n char_composition=args.char_composition,\n bidirectional=True)\n elif args.model == 'SpellHybrid2Vec':\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n wordidx2spelling, vocab_size, max_spelling_len = load_spelling(\n os.path.join(args.data_dir, 'wordidx2charidx.pkl'),\n )\n embedding_model = SpellHybrid2Vec(wordidx2spelling,\n word_vocab_size=args.max_vocab,\n noise_vocab_size=args.max_vocab, # len(noise_weights) if noise_weights is not None else 20000,\n char_vocab_size=len(char2idx),\n embedding_size=args.embedding_size,\n char_embedding_size=args.char_embedding_size,\n dropout=args.dropout,\n char_composition=args.char_composition,\n bidirectional=True)\n\n else:\n raise NotImplementedError('unknown embedding model')\n dataset = LazyTextDataset(corpus_file=os.path.join(args.data_dir, 'corpus.txt'),\n word2idx_file=os.path.join(args.data_dir, 'word2idx.pkl'),\n unigram_prob=unigram_prob,\n window=args.window,\n max_vocab=args.max_vocab if args.model == 'Word2Vec' else 1e8)\n dataloader = DataLoader(dataset=dataset,\n batch_size=args.batch_size,\n shuffle=True,\n collate_fn=my_collate)\n total_batches = int(np.ceil(len(dataset) / args.batch_size))\n sgns = SGNS(embedding_model=embedding_model, num_neg_samples=args.num_neg_samples, weights=noise_unigram_prob)\n optim = Adam(sgns.parameters()) # , lr = 0.5)\n if args.gpuid > -1:\n sgns.init_cuda()\n\n if not os.path.isdir(args.save_dir):\n os.mkdir(args.save_dir)\n print(sgns)\n for epoch in range(1, args.epoch + 1):\n ave_time = 0.\n s = time.time()\n for batch_idx, batch in enumerate(dataloader):\n iword, owords = batch\n nwords = sgns.sample_noise(iword.size()[0])\n loss = sgns(iword, owords, nwords)\n optim.zero_grad()\n loss.backward()\n optim.step()\n if batch_idx % 10 == 0 and batch_idx > 0:\n e = time.time()\n ave_time = (e - s) / 10.\n s = time.time()\n print(\"e{:d} b{:5d}/{:5d} loss:{:7.4f} ave_time:{:7.4f}\\r\".format(epoch,\n batch_idx + 1,\n total_batches,\n loss.data[0],\n ave_time))\n path = args.save_dir + '/' + embedding_model.__class__.__name__ + '_e{:d}_loss{:.4f}'.format(epoch,\n loss.data[0])\n embedding_model.save_model(path)\n if args.eval_dir != '':\n eval_vecs = open(os.path.join(args.save_dir, 'vocab_vec.txt'), 'w', encoding='utf-8')\n eval_vocab = [ev.strip() for ev in\n open(os.path.join(args.eval_dir, 'fullVocab.txt'), 'r', encoding='utf-8').readlines()]\n word2idx = dataset.word2idx\n char2idx = pickle.load(open(os.path.join(args.data_dir, 'char2idx.pkl'), 'rb'))\n for ev in eval_vocab:\n ev_id = word2idx.get(ev, word2idx[''])\n if isinstance(embedding_model, Word2Vec):\n ev_id = ev_id if args.max_vocab > ev_id else word2idx['']\n vec = embedding_model.query(ev_id)\n else:\n ev_id = ev_id if args.max_vocab > ev_id else word2idx['']\n spelling = [char2idx['']] + [char2idx.get(i, char2idx['']) for i in ev] + [char2idx['']]\n spelling = spelling + [char2idx['']] * (max_spelling_len - len(spelling))\n vec = embedding_model.query(ev_id, spelling)\n vec = ','.join(['%4f' % i for i in vec.flatten()])\n eval_vecs.write(ev + ' ' + vec + '\\n')\n eval_vecs.close()\n\n\nif __name__ == '__main__':\n print(parse_args())\n train(parse_args())\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 11732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "numpy.set_printoptions", "line_number": 18, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 50, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "linecache.getline", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.cuda.set_device", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.ByteTensor", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.version", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "model.Word2Vec", "line_number": 131, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "model.load_spelling", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "model.Spell2Vec", "line_number": 137, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "model.load_spelling", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "model.SpellHybrid2Vec", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 172, "usage_type": "call"}, {"api_name": "model.SGNS", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "model.Word2Vec", "line_number": 211, "usage_type": "argument"}]}
+{"seq_id": "589934505", "text": "import numpy as np\nimport scipy.misc as m\nimport keras\n\nclass DataGen(keras.utils.Sequence):\n\n def __init__(self, data_lists=None, paths=None, window=-1, batch_size=256, n_outputs=2, shuffle=True, mode='RGB', load_all=True, imsize=[-1,-1], sampling=-1.0):\n # data_lists <- dict. List of names of the images, ground truth and (optional) masks.\n # paths <- dict. Paths to images, ground truth and (optional) masks\n # window <- int. Window size\n # load_all <- bool. True if all of the data can be load into memory\n self.images_list = data_lists['images']\n self.gt_list = data_lists['gt']\n self.mask_list = data_lists.get('mask')\n self.path_X = paths[\"images\"]\n self.path_y = paths[\"gt\"]\n self.path_mask = paths.get(\"mask\")\n self.window_side = int(window/2)\n self.batch_size = batch_size\n self.n_outputs = n_outputs\n self.imsize = imsize\n self.sampling = sampling\n self.mask_id = self.get_mask_id(self.mask_list)\n self.mode = mode\n self.load_all = load_all\n if self.load_all:\n self.data = self.load_data(images_list, gt_list)\n self.on_epoch_end()\n\n def __len__(self):\n return int(np.floor(len(self.mask_id)/self.batch_size))\n\n def __getitem__(self, index):\n ind = self.mask_id[index*self.batch_size:(index+1)*self.batch_size]\n if self.load_all:\n X,y = self.get_batch(ind.astype(int))\n else:\n X,y = self.datagen(ind.astype(int))\n return X,y\n\n def on_epoch_end(self):\n p = np.random.permutation(len(self.mask_id))\n self.mask_id = self.mask_id[p]\n\n def datagen(self, indices):\n X = []\n y = []\n ind = indices[indices[:,2].argsort()] #sort indices by image index.\n previous_img = -1\n win = self.window_side\n for i,j,id_img in ind:\n if id_img != previous_img:\n previous_img = id_img\n # Special case when only reading the green channel\n if self.mode == 'G':\n img = m.imread(self.path_X+self.images_list[id_img], mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+self.images_list[id_img], mode=self.mode)\n gt = m.imread(self.path_y+self.gt_list[id_img], mode='L')\n\n H, W, *channels = img.shape\n # If i and j are within the image field\n if i - win > 0 and i + win + 1 < H and j - win > 0 and j + win + 1 < W:\n X.append(img[i-win:i+win+1, j-win:j+win+1])\n y.append(gt[i,j])\n\n X = np.asarray(X)\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n y = np.reshape(y, (y.shape[0], 1, 1, y.shape[1]))\n return X, y\n\n def load_data(self, images_list, gt_list):\n imgs = []\n labels = []\n for img_name, gt_name in zip(images_list, gt_list):\n if self.mode == 'G':\n img = m.imread(self.path_X+img_name, mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+img_name, mode=self.mode)\n gt = m.imread(self.path_y+gt_name, mode='L')\n imgs.append(img)\n labels.append(gt)\n return imgs, labels\n\n def get_batch(self, ind):\n X = []\n y = []\n win = self.window_side\n for i, j, id_img in ind:\n H, W, *channels = self.data[0][id_img].shape\n if i - win > 0 and i + win + 1 < H and j - win > 0 and j + win + 1 < W:\n X.append(self.data[0][id_img][i-win:i+win+1, j-win:j+win+1])\n y.append(self.data[1][id_img][i,j])\n X = np.asarray(X)\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n y = np.reshape(y, (y.shape[0], 1, 1, y.shape[1]))\n return X, y\n\n def get_mask_id(self, mask_list):\n # Masks can be used to define pixels to be extracted\n win = self.window_side\n mask_id = np.array([]).reshape(0,3)\n n = len(self.images_list)\n\n # If there is a mask for each image\n if mask_list:\n for i, mask in enumerate(mask_list):\n image = m.imread(self.path_mask+mask, mode='L')\n idx, idy = np.nonzero(image[win:-win, win:-win])\n maskid = np.ones(len(idx), dtype=int)*i\n mask_id = np.concatenate((mask_id, np.dstack((idx,idy,maskid))[0]))\n else:\n # If images are the same size, we can easily create mask_ids\n if self.imsize[0] > 0:\n H, W = self.imsize\n mask_id = np.array(np.meshgrid(np.arange(H), np.arange(W), np.arange(n))).T.reshape(-1,3)\n\n # Otherwise, we need to read every image and find their size\n else:\n for i, img in enumerate(self.images_list):\n image = m.imread(self.path_X+img, mode='L')\n H, W, *C = image.shape\n ids = [[h,w,i] for h in range(H) for w in range(W)]\n mask_id = np.concatenate((mask_id, np.array(ids)))\n\n if sampling > 0:\n n_ids = np.floor(len(mask_id) * self.sampling)\n mask_id = mask_id[:n_ids]\n return mask_id\n\nclass DataGenPatches(DataGen):\n\n def __init__(self, l_patch, s_patch, **kw):\n self.l_patch = l_patch\n self.s_patch = s_patch\n DataGen.__init__(self, **kw)\n\n def get_mask_id(self, mask_list):\n l_patch = self.l_patch\n s_patch = self.s_patch\n mask_id = np.array([]).reshape(0,3)\n n = len(self.images_list)\n\n if self.imsize[0] > 0:\n H, W = self.imsize\n px = np.arange(l_patch//2+1, H - l_patch//2, s_patch)\n py = np.arange(l_patch//2+1, W - l_patch//2, s_patch)\n pz = np.arange(n)\n mask_id = np.array(np.meshgrid(px, py, pz)).T.reshape(-1,3)\n else:\n for i, img in enumerate(self.images_list):\n image = m.imread(self.path_X+img, mode='L')\n H, W, *C = image.shape\n px = np.arange(l_patch//2+1, H - l_patch//2, s_patch)\n py = np.arange(l_patch//2+1, W - l_patch//2, s_patch)\n ids = np.array(np.meshgrid(px, py, i)).T.reshape(-1,3)\n mask_id = np.concatenate((mask_id, np.array(ids)))\n\n if self.sampling > 0:\n n_ids = int(len(mask_id) * self.sampling)\n mask_id = mask_id[:n_ids]\n\n return mask_id\n\n def datagen(self, indices):\n X = []\n y = []\n ind = indices[indices[:,2].argsort()] #sort indices by image index.\n hl_patch_beg = self.l_patch//2\n hl_patch_end = self.l_patch//2 + self.l_patch%2\n hs_patch_beg = self.s_patch//2\n hs_patch_end = self.s_patch//2 + self.s_patch%2\n previous_img = -1\n for i,j,id_img in ind:\n if id_img != previous_img:\n previous_img = id_img\n # Special case when only reading the green channel\n if self.mode == 'G':\n img = m.imread(self.path_X+self.images_list[id_img], mode='RGB')\n img = img[:,:,1]\n else:\n img = m.imread(self.path_X+self.images_list[id_img], mode=self.mode)\n gt = m.imread(self.path_y+self.gt_list[id_img], mode='L')\n\n H, W, *channels = img.shape\n # If i and j are within the image field\n if i - hl_patch_beg > 0 and i + hl_patch_end < H and j - hl_patch_beg > 0 and j + hl_patch_end < W:\n X.append(img[i-hl_patch_beg:i+hl_patch_end, j-hl_patch_beg:j+hl_patch_end])\n y.append(gt[i-hs_patch_beg:i+hs_patch_end, j-hs_patch_beg:j+hs_patch_end])\n\n X = np.asarray(X)\n X = X.astype('float32')/255.0\n if len(X.shape) == 3:\n X = np.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1))\n if self.n_outputs == 2:\n y = (np.asarray(y) > 0).astype(int)\n y = keras.utils.to_categorical(y, self.n_outputs)\n# y = np.reshape(y, (y.shape[0], s_patch, s_patch, y.shape[1]))\n return X, y\n\n", "sub_path": "utils/datagen.py", "file_name": "datagen.py", "file_ext": "py", "file_size_in_byte": 8561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "keras.utils", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "scipy.misc.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 56, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 59, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 60, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 82, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 85, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.nonzero", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 126, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 159, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 189, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 192, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 192, "usage_type": "name"}, {"api_name": "scipy.misc.imread", "line_number": 193, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 207, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 207, "usage_type": "attribute"}]}
+{"seq_id": "202263342", "text": "from __future__ import annotations\n\nimport requests\n\nfrom .powermeter import PowerMeasurementResult, PowerMeter\n\n\nclass ShellyPowerMeter(PowerMeter):\n def __init__(self, shelly_ip):\n self.meter_uri = \"http://{}/status/\".format(shelly_ip)\n\n def get_power(self) -> PowerMeasurementResult:\n r = requests.get(self.meter_uri, timeout=5)\n json = r.json()\n return PowerMeasurementResult(\n float(json[\"meters\"][0][\"power\"]),\n float(json[\"meters\"][0][\"timestamp\"]),\n )\n", "sub_path": "utils/measure_v2/powermeter/shelly.py", "file_name": "shelly.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "powermeter.PowerMeter", "line_number": 8, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "powermeter.PowerMeasurementResult", "line_number": 15, "usage_type": "call"}, {"api_name": "powermeter.PowerMeasurementResult", "line_number": 12, "usage_type": "name"}]}
+{"seq_id": "167763421", "text": "import asyncio\nimport websockets\n\ncmdCon = 'USB CONNECTED'\ncmdDis = 'USB DISCONNECTED'\ncmdUnplug = 'unplug'\nuriServer = \"ws://localhost:8765\"\n\nasync def main():\n async with websockets.connect(uriServer) as websocket:\n while True:\n try:\n await websocket.send(cmdCon)\n income = await websocket.recv()\n if income == cmdUnplug:\n await websocket.send(cmdDis)\n exit()\n except websockets.ConnectionClosed:\n print(f\"Connection closed\")\n break\n\nasyncio.get_event_loop().run_until_complete(main())\nasyncio.get_event_loop().run_forever()", "sub_path": "ws/utils/python/connect.py", "file_name": "connect.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "websockets.connect", "line_number": 10, "usage_type": "call"}, {"api_name": "websockets.ConnectionClosed", "line_number": 18, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 22, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "299887841", "text": "import logging\nfrom datetime import datetime\nfrom textwrap import shorten\n\nimport requests\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils import Choices\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom taggit.managers import TaggableManager\nfrom taggit.models import TaggedItemBase\nfrom wagtail.wagtailadmin.edit_handlers import (FieldPanel, InlinePanel, PageChooserPanel, MultiFieldPanel,\n StreamFieldPanel)\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore.fields import RichTextField, StreamField\nfrom wagtail.wagtailcore.models import Page, Orderable\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom core.fields import MarkdownField\nfrom core.models import MemberProfile, Platform, Event, Job\nfrom core.utils import get_canonical_image\nfrom home.forms import ContactForm\nfrom library.models import Codebase\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nWagtail Page models and related supporting Models and Settings\n\n\"\"\"\n\n\nclass UserMessage(models.Model):\n \"\"\"\n FIXME: consider removing this class, use email for messaging.\n \"\"\"\n user = models.ForeignKey(User, related_name='inbox')\n sender = models.ForeignKey(User, related_name='outbox')\n message = models.CharField(max_length=512)\n date_created = models.DateTimeField(auto_now_add=True)\n read_on = models.DateTimeField(null=True, blank=True)\n\n def is_read(self):\n return self.read_on is not None\n\n\nclass LinkFields(models.Model):\n \"\"\"\n Cribbed from github.com/wagtail/wagtaildemo\n \"\"\"\n link_external = models.URLField(\"External link\", blank=True)\n link_page = models.ForeignKey(\n Page,\n null=True,\n blank=True,\n related_name='+'\n )\n link_codebase = models.ForeignKey(\n 'library.Codebase',\n null=True,\n blank=True,\n related_name='+'\n )\n\n @property\n def link(self):\n if self.link_page:\n return self.link_page.url\n elif self.link_codebase:\n return self.link_codebase.get_absolute_url()\n else:\n return self.link_external\n\n panels = [\n FieldPanel('link_external'),\n PageChooserPanel('link_page'),\n # figure out how to manually link codebase / events / jobs into FeaturedContentItem\n # CodebaseChooserPanel('link_codebase'),\n ]\n\n class Meta:\n abstract = True\n\n\nclass CarouselItem(LinkFields):\n image = models.ForeignKey('wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n codebase_image = models.ForeignKey('library.CodebaseImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n embed_url = models.URLField(\"Embed URL\", blank=True)\n caption = models.CharField(max_length=255)\n summary = models.TextField(max_length=600, blank=True)\n title = models.CharField(max_length=255)\n panels = [\n ImageChooserPanel('image'),\n ImageChooserPanel('codebase_image'),\n FieldPanel('embed_url'),\n FieldPanel('caption'),\n FieldPanel('title'),\n MultiFieldPanel(LinkFields.panels, \"Link\"),\n ]\n\n @property\n def featured_image(self):\n if self.image:\n return self.image\n elif self.codebase_image:\n return self.codebase_image\n return None\n\n class Meta:\n abstract = True\n\n\nclass FeaturedContentItem(Orderable, CarouselItem):\n page = ParentalKey('home.LandingPage', related_name='featured_content_queue')\n\n\nclass LandingPage(Page):\n template = 'home/index.jinja'\n FEATURED_CONTENT_COUNT = 6\n MAX_CALLOUT_ENTRIES = 3\n RECENT_FORUM_ACTIVITY_COUNT = 5\n\n mission_statement = models.CharField(max_length=512)\n community_statement = models.TextField()\n\n def get_featured_content(self):\n return self.featured_content_queue.all()[:self.FEATURED_CONTENT_COUNT]\n\n def get_recent_forum_activity(self):\n # FIXME: move to dedicated discourse module / api as we integrate more tightly with discourse\n # Discourse API endpoint documented at http://docs.discourse.org/#tag/Topics%2Fpaths%2F~1latest.json%2Fget\n if settings.DEBUG:\n random_submitters = User.objects.filter(pk__in=(3, 5, 7, 11, 13, 17))\n return [\n {\n 'title': \"Generated Forum Topic {}\".format(i),\n 'submitter': random_submitters[i],\n 'date_created': datetime.now(),\n 'url': \"https://forum.example.com/topic/{}\".format(i),\n }\n for i in range(self.RECENT_FORUM_ACTIVITY_COUNT)\n ]\n\n # FIXME: refactor and clean up logic, extract to a sensible discourse api\n r = requests.get('{0}/{1}'.format(settings.DISCOURSE_BASE_URL, 'latest.json'),\n params={'order': 'created', 'sort': 'asc'})\n posts_dict = r.json()\n topics = posts_dict['topic_list']['topics']\n recent_forum_activity = cache.get('recent_forum_activity')\n if recent_forum_activity:\n return recent_forum_activity\n # transform topics list of dictionaries into web template format with title, submitter, date_created, and url.\n\n recent_forum_activity = []\n # stuff this in the Redis Cache.\n for topic in topics[:self.RECENT_FORUM_ACTIVITY_COUNT]:\n topic_title = topic['title']\n topic_url = '{0}/t/{1}/{2}'.format(settings.DISCOURSE_BASE_URL,\n topic['slug'],\n topic['id'])\n # getting back to the original submitter will involve some trickery.\n # The Discourse embed Javascript queues up a crawler to hit the given page and parses it for content to use\n # as the initial topic text. However, this topic gets added as a specific Discourse User (`comses`,\n # see https://meta.discourse.org/t/embedding-discourse-comments-via-javascript/31963/150 for more details)\n # and so we won't always have the direct username of the submitter without looking it up by\n # 1. Discourse category_id (6 = jobs & appointments, 7 = events, 8 = codebase)\n # 2. Title (not guaranteed to be unique)\n\n last_poster_username = topic['last_poster_username']\n submitter = None\n submitter_url = None\n if last_poster_username == 'comses':\n category_id = topic['category_id']\n logger.debug(\"category id: %s, topic title: %s, topic: %s\", category_id, topic_title, topic)\n # special case lookup for real submitter\n # FIXME: get rid of magic constants\n target_object = None\n if category_id == 6:\n # jobs and appointments\n target_object = Job.objects.filter(title=topic_title).order_by('-date_created').first()\n elif category_id == 7:\n # events\n target_object = Event.objects.filter(title=topic_title).order_by('-date_created').first()\n elif category_id == 8:\n target_object = Codebase.objects.filter(title=topic_title).order_by('-date_created').first()\n if target_object:\n submitter = target_object.submitter\n submitter_url = submitter.member_profile.get_absolute_url()\n else:\n submitter = User.objects.get(username='AnonymousUser')\n else:\n try:\n submitter = User.objects.get(username=last_poster_username)\n except User.DoesNotExist:\n pass\n recent_forum_activity.append(\n {\n 'title': topic_title,\n 'submitter_name': submitter.username,\n 'submitter_url': submitter_url,\n # FIXME: handle created_at=None gracefully, via default date?\n 'date_created': datetime.strptime(topic.get('created_at'), \"%Y-%m-%dT%H:%M:%S.%fZ\"),\n 'url': topic_url,\n }\n )\n cache.set('recent_forum_activity', recent_forum_activity, 3600)\n return recent_forum_activity\n\n def get_latest_jobs(self):\n return Job.objects.order_by('-date_created')[:self.MAX_CALLOUT_ENTRIES]\n\n def get_upcoming_events(self):\n return Event.objects.upcoming().order_by('start_date')[:self.MAX_CALLOUT_ENTRIES]\n\n def get_context(self, request, *args, **kwargs):\n context = super(LandingPage, self).get_context(request, *args, **kwargs)\n context['featured_content'] = self.get_featured_content()\n context['recent_forum_activity'] = self.get_recent_forum_activity()\n context['latest_jobs'] = self.get_latest_jobs()\n context['upcoming_events'] = self.get_upcoming_events()\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('mission_statement', widget=forms.Textarea),\n FieldPanel('community_statement'),\n InlinePanel('featured_content_queue', label=_('Featured Content')),\n ]\n\n\nclass CategoryIndexItem(Orderable, models.Model):\n page = ParentalKey('home.CategoryIndexPage', related_name='callouts')\n image = models.ForeignKey('wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+')\n url = models.CharField(\"Relative path, absolute path, or URL\", max_length=200, blank=True)\n title = models.CharField(max_length=255)\n caption = models.CharField(max_length=600)\n\n def __str__(self):\n return \"{0} {1}\".format(self.title, self.url)\n\n\nclass SubnavigationMenu():\n pass\n\n\nclass SubNavigationLink(Orderable, models.Model):\n page = ParentalKey(Page, related_name='navigation_links')\n url = models.CharField(\"Relative path, absolute path, or full URL\", max_length=255)\n title = models.CharField(max_length=128)\n\n\nclass Breadcrumb(Orderable, models.Model):\n page = ParentalKey(Page, related_name='breadcrumbs')\n url = models.CharField(\"Relative / absolute path or full URL\", max_length=255, blank=True)\n title = models.CharField(max_length=255)\n\n def __str__(self):\n return '{0}: {1}'.format(self.title, self.url)\n\n\nclass NavigationMixin(object):\n def add_breadcrumbs(self, breadcrumb_tuples):\n self._add_tuples(breadcrumb_tuples, Breadcrumb)\n\n def get_breadcrumbs(self):\n return [\n {'url': item.url, 'text': item.title}\n for item in self.breadcrumbs.all()\n ]\n\n def _add_tuples(self, tuples, cls):\n related_name = cls._meta.get_field('page').related_query_name()\n related_manager = getattr(self, related_name)\n for idx, (title, url) in enumerate(tuples):\n related_manager.add(\n cls(title=title, url=url, sort_order=idx)\n )\n\n def add_navigation_links(self, navigation_tuples):\n \"\"\"\n Takes an ordered list of tuples and adds them as navigation links.\n :param navigation_tuples:\n :return:\n \"\"\"\n self._add_tuples(navigation_tuples, SubNavigationLink)\n\n def get_navigation_links(self):\n \"\"\"\n Returns a nested dict for use by the subnav Jinja2 tag.\n :return:\n \"\"\"\n return [\n {'url': nav.url, 'text': nav.title, 'active': nav.url.endswith(self.slug + '/')}\n for nav in self.navigation_links.all()\n ]\n\n\nclass CategoryIndexPage(NavigationMixin, Page):\n template = models.CharField(max_length=128, default='home/category_index.jinja')\n heading = models.CharField(max_length=128, help_text=_(\"Short name to be placed in introduction header.\"))\n summary = models.CharField(max_length=1000, help_text=_('Summary blurb for this category index page.'))\n\n def add_callout(self, image_path, title, caption, sort_order=None, user=None, url=''):\n if user is None:\n user = User.objects.get(username='alee')\n _image = get_canonical_image(title=title, path=image_path, user=user)\n self.callouts.add(\n CategoryIndexItem(\n title=title,\n sort_order=sort_order,\n caption=caption,\n image=_image,\n url=url,\n )\n )\n\n content_panels = Page.content_panels + [\n # don't expose template to web form for now, could wreak havoc\n FieldPanel('heading'),\n FieldPanel('template'),\n FieldPanel('summary', widget=forms.Textarea),\n InlinePanel('callouts', label=_('Captioned Image Callouts')),\n InlinePanel('navigation_links', label=_('Subnavigation Links')),\n ]\n\n search_fields = Page.search_fields + [\n index.SearchField('summary')\n ]\n\n\nclass StreamPage(Page, NavigationMixin):\n template = models.CharField(max_length=128, default='home/stream_page.jinja')\n date = models.DateField(\"Post date\", default=timezone.now)\n description = models.CharField(max_length=512, blank=True)\n\n body = StreamField([\n ('heading', blocks.CharBlock(classname='full title')),\n ('paragraph', blocks.RichTextBlock()),\n ('image', ImageChooserBlock()),\n ('url', blocks.URLBlock(required=False))\n ])\n\n content_panels = Page.content_panels + [\n FieldPanel('date'),\n FieldPanel('description'),\n StreamFieldPanel('body'),\n ]\n\n\nclass MarkdownPage(NavigationMixin, Page):\n template = models.CharField(max_length=128, default='home/markdown_page.jinja')\n heading = models.CharField(max_length=128, blank=True)\n date = models.DateField(\"Post date\", default=timezone.now)\n description = MarkdownField(max_length=512, blank=True)\n body = MarkdownField(blank=True)\n jumbotron = models.BooleanField(\n default=True,\n help_text=_(\"True if this page should display its title and description in a jumbotron\"))\n\n content_panels = Page.content_panels + [\n FieldPanel('heading'),\n FieldPanel('date'),\n FieldPanel('description'),\n FieldPanel('body'),\n ]\n\n search_fields = Page.search_fields + [\n index.SearchField('date'),\n index.SearchField('description'),\n index.SearchField('body')\n ]\n\n\nclass ContactPage(NavigationMixin, Page):\n template = 'home/about/contact.jinja'\n description = models.CharField(max_length=512, blank=True)\n\n def serve(self, request):\n if request.method == 'POST':\n form = ContactForm(request=request, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('home:contact-sent')\n else:\n form = ContactForm(request)\n\n return render(request, self.template, {\n 'page': self,\n 'form': form,\n })\n\n content_panels = Page.content_panels + [\n FieldPanel('description')\n ]\n\n\nclass PlatformSnippetPlacement(Orderable, models.Model):\n page = ParentalKey('home.PlatformIndexPage', related_name='platform_placements')\n platform = models.ForeignKey(Platform, related_name='+')\n\n class Meta:\n verbose_name = 'platform placement'\n verbose_name_plural = 'platform placements'\n\n panels = [\n SnippetChooserPanel('platform'),\n ]\n\n def __str__(self):\n return \"Snippet placement for {0}\".format(self.platform.name)\n\n\nclass PlatformIndexPage(NavigationMixin, Page):\n template = 'home/resources/platforms.jinja'\n description = models.TextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('platform_placements', label='Platforms'),\n ]\n\n def get_platforms(self):\n # highlight featured platforms? allow the community to rank them.\n return self.platform_placements.all()\n\n def get_context(self, request):\n context = super().get_context(request)\n # FIXME: add pagination\n context['platforms'] = self.get_platforms()\n return context\n\n\nclass JournalTag(TaggedItemBase):\n content_object = ParentalKey('home.Journal', related_name='tagged_journals')\n\n\n@register_snippet\nclass Journal(index.Indexed, ClusterableModel):\n name = models.CharField(max_length=255)\n url = models.URLField()\n issn = models.CharField(max_length=16, blank=True, help_text=_(\"Linking ISSN-L for this Journal\"))\n description = models.CharField(max_length=1000)\n tags = TaggableManager(through=JournalTag, blank=True)\n\n panels = [\n FieldPanel('title'),\n FieldPanel('url'),\n FieldPanel('issn'),\n FieldPanel('description', widget=forms.Textarea),\n FieldPanel('tags'),\n ]\n\n search_fields = [\n index.SearchField('name'),\n index.SearchField('description'),\n index.SearchField('issn'),\n index.RelatedFields('tags', [\n index.SearchField('name'),\n ]),\n ]\n\n\nclass JournalSnippetPlacement(Orderable, models.Model):\n page = ParentalKey('home.JournalIndexPage', related_name='journal_placements')\n journal = models.ForeignKey(Journal, related_name='+')\n\n class Meta:\n verbose_name = 'journal placement'\n verbose_name_plural = 'journal placements'\n\n\nclass JournalIndexPage(NavigationMixin, Page):\n template = 'home/resources/journals.jinja'\n description = models.TextField(blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('journal_placements', label='Journals'),\n ]\n\n\n@register_snippet\nclass FaqEntry(index.Indexed, models.Model):\n FAQ_CATEGORIES = Choices(\n ('abm', _('Agent-based Modeling Questions')),\n ('general', _('General CoMSES Net Questions')),\n ('model-library', _('Computational Model Library Questions')),\n )\n category = models.CharField(max_length=32, choices=FAQ_CATEGORIES, default=FAQ_CATEGORIES.general)\n question = models.CharField(max_length=128, help_text=_(\"Short question\"))\n answer = models.TextField(help_text=_(\"Markdown formatted answer\"))\n date_created = models.DateTimeField(auto_now=True)\n last_modified = models.DateTimeField(auto_now_add=True)\n submitter = models.ForeignKey(User, blank=True, null=True)\n\n def __str__(self):\n return \"[{0}] {1} {2}\".format(self.category, self.question, shorten(self.answer, 140))\n\n\nclass FaqEntryPlacement(Orderable, models.Model):\n page = ParentalKey('home.FaqPage', related_name='faq_entry_placements')\n faq_entry = models.ForeignKey(FaqEntry, related_name='+')\n\n class Meta:\n verbose_name = 'faq placement'\n\n\nclass FaqPage(Page, NavigationMixin):\n template = 'home/about/faq.jinja'\n description = models.CharField(max_length=1000)\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n # FIXME: add pagination\n context['faq_entries'] = FaqEntry.objects.all()\n context['faq_categories'] = FaqEntry.FAQ_CATEGORIES\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n InlinePanel('faq_entry_placements', label='FAQ Entries')\n ]\n\n search_fields = Page.search_fields + [\n index.RelatedFields('faq_entry_placements', [\n index.SearchField('faq_entry')\n ])\n ]\n\n\nclass PeopleEntryPlacement(Orderable, models.Model):\n CATEGORIES = Choices(\n (1, 'directorate', _('Directorate')),\n (2, 'board', _('Executive Board')),\n (3, 'digest', _('CoMSES Digest Editors')),\n (4, 'staff', _('Staff')),\n (5, 'alumni', _('Executive Board Alumni')),\n )\n page = ParentalKey('home.PeoplePage', related_name='people_entry_placements')\n member_profile = models.ForeignKey('core.MemberProfile', related_name='+')\n category = models.PositiveIntegerField(choices=CATEGORIES, default=CATEGORIES.board)\n\n def __str__(self):\n return \"{0}: {1} {2}\".format(self.sort_order, self.member_profile, self.category)\n\n class Meta:\n verbose_name = 'people entry placement'\n\n\nclass PeoplePage(Page, NavigationMixin):\n template = 'home/about/people.jinja'\n heading = models.CharField(max_length=64)\n description = models.CharField(max_length=1000, blank=True)\n\n def add_users(self, category, usernames, offset):\n for idx, username in enumerate(usernames):\n # manually iterate and get MemberProfile to enforce original ordering\n profile = MemberProfile.objects.get(user__username=username)\n self.people_entry_placements.add(\n PeopleEntryPlacement(sort_order=offset + idx,\n member_profile=profile,\n category=category)\n )\n\n def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n context['people_categories'] = PeopleEntryPlacement.CATEGORIES\n return context\n\n content_panels = Page.content_panels + [\n FieldPanel('heading'),\n FieldPanel('description'),\n InlinePanel('people_entry_placements', label='People Entries')\n ]\n\n\nclass NewsIndexPage(Page):\n def get_context(self, request):\n context = super(NewsIndexPage, self).get_context(request)\n context['news_entries'] = NewsPage.objects.child_of(self).live()\n return context\n\n\nclass NewsPage(Page):\n body = RichTextField()\n date = models.DateField(\"Post date\")\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('body'),\n index.FilterField('date')\n ]\n\n # Editor panels configuration\n content_panels = Page.content_panels + [\n FieldPanel('date'),\n FieldPanel('body', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n ]\n\n promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n ]\n\n # Parent page / subpage type rules\n parent_page_types = ['home.NewsIndexPage']\n subpage_types = []\n\n\nclass NewsPageRelatedLink(Orderable):\n page = ParentalKey(NewsPage, related_name='related_links')\n name = models.CharField(max_length=255)\n url = models.URLField()\n\n panels = [\n FieldPanel('name'),\n FieldPanel('url'),\n ]\n", "sub_path": "django/home/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 23503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 49, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 63, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 64, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 86, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.PageChooserPanel", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 108, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 112, "usage_type": "call"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 113, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 114, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 115, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 116, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.MultiFieldPanel", "line_number": 117, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 132, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 133, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 142, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 143, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 151, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 151, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 152, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 157, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 157, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 164, "usage_type": "call"}, {"api_name": "django.conf.settings.DISCOURSE_BASE_URL", "line_number": 164, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 164, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 168, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 168, "usage_type": "name"}, {"api_name": "django.conf.settings.DISCOURSE_BASE_URL", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 177, "usage_type": "name"}, {"api_name": "core.models.Job.objects.filter", "line_number": 199, "usage_type": "call"}, {"api_name": "core.models.Job.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "core.models.Job", "line_number": 199, "usage_type": "name"}, {"api_name": "core.models.Event.objects.filter", "line_number": 202, "usage_type": "call"}, {"api_name": "core.models.Event.objects", "line_number": 202, "usage_type": "attribute"}, {"api_name": "core.models.Event", "line_number": 202, "usage_type": "name"}, {"api_name": "library.models.Codebase.objects.filter", "line_number": 204, "usage_type": "call"}, {"api_name": "library.models.Codebase.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "library.models.Codebase", "line_number": 204, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 209, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 209, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 209, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 212, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 212, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 212, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 213, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 213, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 221, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 225, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 225, "usage_type": "name"}, {"api_name": "core.models.Job.objects.order_by", "line_number": 229, "usage_type": "call"}, {"api_name": "core.models.Job.objects", "line_number": 229, "usage_type": "attribute"}, {"api_name": "core.models.Job", "line_number": 229, "usage_type": "name"}, {"api_name": "core.models.Event.objects.upcoming", "line_number": 232, "usage_type": "call"}, {"api_name": "core.models.Event.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "core.models.Event", "line_number": 232, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 242, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 242, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 243, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 243, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 243, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 244, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 245, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 245, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 249, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 249, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 249, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 250, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 251, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 251, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 254, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 254, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 256, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 256, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 257, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 257, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 258, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 258, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 268, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 268, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 269, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 269, "usage_type": "argument"}, {"api_name": "django.db.models.CharField", "line_number": 270, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 270, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 271, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 271, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 274, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 274, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 274, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 275, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 275, "usage_type": "argument"}, {"api_name": "django.db.models.CharField", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 276, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 277, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 320, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 321, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 321, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 322, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 322, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 322, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 323, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 323, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 327, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 327, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 327, "usage_type": "name"}, {"api_name": "core.utils.get_canonical_image", "line_number": 328, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 339, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 339, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 341, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 342, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 343, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 343, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 343, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 344, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 344, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 345, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 345, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 348, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 348, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 349, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 349, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 353, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 354, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 354, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 355, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 355, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 355, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 355, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 356, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 356, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.fields.StreamField", "line_number": 358, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks.CharBlock", "line_number": 359, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 359, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.blocks.RichTextBlock", "line_number": 360, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 360, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.blocks.ImageChooserBlock", "line_number": 361, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks.URLBlock", "line_number": 362, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.blocks", "line_number": 362, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 365, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 365, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 366, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 367, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.StreamFieldPanel", "line_number": 368, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 372, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 373, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 373, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 374, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 374, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 375, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 375, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 375, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 375, "usage_type": "name"}, {"api_name": "core.fields.MarkdownField", "line_number": 376, "usage_type": "call"}, {"api_name": "core.fields.MarkdownField", "line_number": 377, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 378, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 378, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 380, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 382, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 382, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 383, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 384, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 385, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 386, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 389, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 389, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 390, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 390, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 391, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 391, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 392, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 392, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 396, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 398, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 398, "usage_type": "name"}, {"api_name": "home.forms.ContactForm", "line_number": 402, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 405, "usage_type": "call"}, {"api_name": "home.forms.ContactForm", "line_number": 407, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 409, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 414, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 414, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 415, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 419, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 419, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 419, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 420, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 421, "usage_type": "call"}, {"api_name": "core.models.Platform", "line_number": 421, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 421, "usage_type": "name"}, {"api_name": "wagtail.wagtailsnippets.edit_handlers.SnippetChooserPanel", "line_number": 428, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 435, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 437, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 437, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 439, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 439, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 440, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 441, "usage_type": "call"}, {"api_name": "taggit.models.TaggedItemBase", "line_number": 455, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 456, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.Indexed", "line_number": 460, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 460, "usage_type": "name"}, {"api_name": "modelcluster.models.ClusterableModel", "line_number": 460, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 461, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 461, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 462, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 462, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 463, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 464, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 464, "usage_type": "name"}, {"api_name": "taggit.managers.TaggableManager", "line_number": 465, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 468, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 469, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 470, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 471, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 471, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 471, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 472, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 476, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 476, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 477, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 477, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 478, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 478, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.RelatedFields", "line_number": 479, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 479, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 480, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 480, "usage_type": "name"}, {"api_name": "wagtail.wagtailsnippets.models.register_snippet", "line_number": 459, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 485, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 485, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 485, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 486, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 487, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 487, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 494, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 496, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 496, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 498, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 498, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 499, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 500, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index.Indexed", "line_number": 505, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 505, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 505, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 505, "usage_type": "name"}, {"api_name": "model_utils.Choices", "line_number": 506, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 507, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 508, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 509, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 511, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 511, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 512, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 512, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 513, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 513, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 513, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 514, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 514, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 515, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 515, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 516, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 516, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 516, "usage_type": "name"}, {"api_name": "textwrap.shorten", "line_number": 519, "usage_type": "call"}, {"api_name": "wagtail.wagtailsnippets.models.register_snippet", "line_number": 504, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 522, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 522, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 522, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 523, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 524, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 524, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 530, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 532, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 532, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 541, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 541, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 542, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 543, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 546, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 546, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.RelatedFields", "line_number": 547, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 547, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 548, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 548, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 553, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 553, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 553, "usage_type": "name"}, {"api_name": "model_utils.Choices", "line_number": 554, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 555, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 556, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 557, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 558, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 559, "usage_type": "call"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 561, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 562, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 562, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 563, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 563, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 572, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 574, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 574, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 575, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 575, "usage_type": "name"}, {"api_name": "core.models.MemberProfile.objects.get", "line_number": 580, "usage_type": "call"}, {"api_name": "core.models.MemberProfile.objects", "line_number": 580, "usage_type": "attribute"}, {"api_name": "core.models.MemberProfile", "line_number": 580, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 592, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 592, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 593, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 594, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 595, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 599, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 606, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.fields.RichTextField", "line_number": 607, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 608, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 608, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 609, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 609, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 613, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 613, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.search_fields", "line_number": 617, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 617, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.SearchField", "line_number": 618, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 618, "usage_type": "name"}, {"api_name": "wagtail.wagtailsearch.index.FilterField", "line_number": 619, "usage_type": "call"}, {"api_name": "wagtail.wagtailsearch.index", "line_number": 619, "usage_type": "name"}, {"api_name": "wagtail.wagtailcore.models.Page.content_panels", "line_number": 623, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 623, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 624, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 625, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.InlinePanel", "line_number": 626, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.MultiFieldPanel", "line_number": 630, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Page.promote_panels", "line_number": 630, "usage_type": "attribute"}, {"api_name": "wagtail.wagtailcore.models.Page", "line_number": 630, "usage_type": "name"}, {"api_name": "wagtail.wagtailimages.edit_handlers.ImageChooserPanel", "line_number": 631, "usage_type": "call"}, {"api_name": "wagtail.wagtailcore.models.Orderable", "line_number": 639, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 640, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 641, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 641, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 642, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 642, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 645, "usage_type": "call"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 646, "usage_type": "call"}]}
+{"seq_id": "560631039", "text": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom openpose.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom openpose.config import cfg\nfrom openpose.data import make_data_loader\nfrom openpose.solver import make_lr_scheduler\nfrom openpose.solver import make_optimizer\nfrom openpose.engine.inference import inference\nfrom openpose.engine.trainer import do_train_test\nfrom openpose.model.detector.densepose_model import DensePoseModel\nfrom openpose.utils.checkpoint import DetectronCheckpointer\nfrom openpose.utils.collect_env import collect_env_info\nfrom openpose.utils.comm import synchronize, get_rank\nfrom openpose.utils.imports import import_file\nfrom openpose.utils.logger import setup_logger\nfrom openpose.utils.miscellaneous import mkdir\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as plt_patches\nimport numpy as np\nfig,ax = plt.subplots(1)\nfigsub,axsub = plt.subplots(1)\n\ndef load_checkpoint(cfg, local_rank, distributed):\n model = DensePoseModel(cfg)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank], output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n\n arguments = {}\n arguments[\"iteration\"] = 0\n\n output_dir = cfg.OUTPUT_DIR\n\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(\n cfg, model, optimizer, scheduler, output_dir, save_to_disk\n )\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n data_loader = make_data_loader(\n cfg,\n is_train=False,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n )\n return model, data_loader\n\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\n \"--config-file\",\n default=\"../../config/densepose_r50_fpn_e2e.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--skip-test\",\n dest=\"skip_test\",\n help=\"Do not test the final model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"openpose\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n model,data_loader = load_checkpoint(cfg, args.local_rank, args.distributed)\n model.eval()\n for iteration, (images,_, _) in enumerate(data_loader[0]):\n images = images.to(cfg.MODEL.DEVICE)\n ax.clear()\n axsub.clear()\n pred = model(images)[0]\n img0 = images.tensors[0]\n img0 = img0.detach().cpu().numpy().transpose(1,2,0)\n #img0[...] += [102.9801,115.9465,122.7117]\n img0[:,:,0] += 102.9801\n img0[:,:,1] += 115.9465\n img0[:,:,2] += 122.7117\n img0 = img0.astype(np.uint8)\n bbox = pred.bbox\n uvs = pred.get_field('uv_output')\n ax.imshow(img0[:,:,::-1])\n\n for box,uv in zip(bbox[:2],uvs[:2]):\n rect = plt_patches.Rectangle((box[0],box[1]),box[2]-box[0],box[3]-box[1],linewidth=1,edgecolor='r',facecolor='none') \n ax.add_patch(rect)\n res = uv[0]\n for ch in range(1,15):\n res = np.hstack((res,uv[ch]))\n axsub.imshow(res)\n plt.draw()\n plt.pause(0.01)\n input()\n\n # if not args.skip_test:\n # run_test(cfg, model, args.distributed)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "openpose/tests/test_resnet50_test.py", "file_name": "test_resnet50_test.py", "file_ext": "py", "file_size_in_byte": 5052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "openpose.model.detector.densepose_model.DensePoseModel", "line_number": 34, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 34, "usage_type": "argument"}, {"api_name": "torch.device", "line_number": 35, "usage_type": "call"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 35, "usage_type": "name"}, {"api_name": "openpose.solver.make_optimizer", "line_number": 38, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 38, "usage_type": "argument"}, {"api_name": "openpose.solver.make_lr_scheduler", "line_number": 39, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 39, "usage_type": "argument"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg.OUTPUT_DIR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 51, "usage_type": "name"}, {"api_name": "openpose.utils.comm.get_rank", "line_number": 53, "usage_type": "call"}, {"api_name": "openpose.utils.checkpoint.DetectronCheckpointer", "line_number": 54, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 55, "usage_type": "argument"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 57, "usage_type": "name"}, {"api_name": "openpose.data.make_data_loader", "line_number": 59, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 60, "usage_type": "argument"}, {"api_name": "argparse.ArgumentParser", "line_number": 72, "usage_type": "call"}, {"api_name": "argparse.REMAINDER", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 101, "usage_type": "attribute"}, {"api_name": "openpose.utils.comm.synchronize", "line_number": 104, "usage_type": "call"}, {"api_name": "openpose.config.cfg.merge_from_file", "line_number": 106, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 106, "usage_type": "name"}, {"api_name": "openpose.config.cfg.merge_from_list", "line_number": 107, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 107, "usage_type": "name"}, {"api_name": "openpose.config.cfg.freeze", "line_number": 108, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 108, "usage_type": "name"}, {"api_name": "openpose.config.cfg.OUTPUT_DIR", "line_number": 110, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 110, "usage_type": "name"}, {"api_name": "openpose.utils.miscellaneous.mkdir", "line_number": 112, "usage_type": "call"}, {"api_name": "openpose.utils.logger.setup_logger", "line_number": 114, "usage_type": "call"}, {"api_name": "openpose.utils.comm.get_rank", "line_number": 114, "usage_type": "call"}, {"api_name": "openpose.utils.collect_env.collect_env_info", "line_number": 119, "usage_type": "call"}, {"api_name": "openpose.config.cfg", "line_number": 125, "usage_type": "argument"}, {"api_name": "openpose.config.cfg", "line_number": 127, "usage_type": "argument"}, {"api_name": "openpose.config.cfg.MODEL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "openpose.config.cfg", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 140, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 146, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}]}
+{"seq_id": "425976576", "text": "from sklearn.preprocessing import LabelBinarizer\n#from sklearn.metrics import classification_report\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport tensorflow as tf\nif tf.test.gpu_device_name():\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\nelse:\n print(\"Please install GPU version of TF\")\n\n#import sys\n#sys.path.append(\"/home/hrushikesh/dl4cv/callbacks\")\n\nfrom minivggnet import MiniVGGNet\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.optimizers import SGD\nimport argparse\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-w\",\"--weights\",required=True,\n help=\"path to the best weights file\")\n#output directory to store figure and serialized JSON training history\nargs=vars(ap.parse_args())\n\n# load the training and testing data, then scale it into the\n# range [0, 1]\nprint(\"[INFO] loading CIFAR-10 data...\")\n((trainX, trainY), (testX, testY)) = cifar10.load_data()\ntrainX = trainX.astype(\"float\") / 255.0\ntestX = testX.astype(\"float\") / 255.0\n# convert the labels from integers to vectors\n\nlb = LabelBinarizer()\ntrainY = lb.fit_transform(trainY)\ntestY = lb.transform(testY)\n# initialize the optimizer and model\nprint(\"[INFO] compiling model...\")\nopt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)\nmodel = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\ncheckpoint= ModelCheckpoint(args[\"weights\"], monitor=\"val_loss\", mode=\"min\",\n save_best_only=True, verbose=1)\ncallbacks=[checkpoint]\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY),\nbatch_size=64, epochs=40, callbacks=callbacks, verbose=2)\n\n", "sub_path": "conv/cifar10_checkpoint_best.py", "file_name": "cifar10_checkpoint_best.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tensorflow.test.gpu_device_name", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.test", "line_number": 10, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.cifar10.load_data", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.datasets.cifar10", "line_number": 32, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 42, "usage_type": "call"}, {"api_name": "minivggnet.MiniVGGNet.build", "line_number": 43, "usage_type": "call"}, {"api_name": "minivggnet.MiniVGGNet", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "526887720", "text": "import os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Flatten, Dense\nfrom keras.models import load_model\nimport numpy as np\n\n\ntrain_count = 0\n\ntrain_count += len(os.listdir('trainset/'))\n\n\nimg_width, img_height = 100, 100\ntrain_data_dir = \"trainset/\"\nbatch_size = 32\nepochs = 5\nnb_classes = 10\ninput_shape = (img_width, img_height, 1)\n\nnb_train_samples = train_count\n\ndef nnmodel():\n\n model = Sequential()\n model.add(Conv2D(128, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(1000))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes, activation=\"softmax\"))\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n return model\n\n\ndef train():\n\n model = nnmodel()\n\n print (model.summary())\n\n # this is the augmentation configuration we will use for training\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n color_mode='grayscale',\n batch_size=batch_size,\n class_mode='categorical',\n shuffle=True)\n\n\n from PIL import ImageFile\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n\n model.fit_generator(\n train_generator,\n #steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs)\n #callbacks=[monitor,checkpointer])\n\n #model.load_weights('best_weights.hdf5') # load weights from best model\n model.save('model.h5')\n\n\n\ndef test():\n\n model = load_model('model.h5')\n # dimensions of our images.\n img_width, img_height = 100, 100\n\n test_data_dir = 'testset'\n\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_data_dir,\n target_size=(img_width, img_height),\n color_mode='grayscale',\n batch_size=batch_size,\n class_mode = 'categorical',\n shuffle=False)\n\n filenames = test_generator.filenames\n\n print (filenames)\n predictions = model.predict_generator(test_generator, len(filenames))\n classes = []\n print (predictions)\n for prediction in predictions :\n #print sum(prediction)\n classes.append((list(prediction).index(max(prediction))))\n print (classes)\n\n\n#train()\n\ntest()\n\n\n\n\n\n\n", "sub_path": "signLanguagePredict.py", "file_name": "signLanguagePredict.py", "file_ext": "py", "file_size_in_byte": 2840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 73, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "301025180", "text": "#!/usr/bin/python3\n\"\"\"View fo User objects\"\"\"\n\nfrom models import storage\nfrom models.city import City\nfrom models.review import Review\nfrom models.place import Place\nfrom models.user import User\nfrom flask import Flask, jsonify, request, abort\nfrom api.v1.views import app_views\n\n\n@app_views.route('/places//reviews', methods=['GET'],\n strict_slashes=False)\ndef all_reviews(place_id):\n \"\"\"Retrieve all reviews based on place_id\"\"\"\n obj_places = storage.get(Place, place_id)\n list_reviews = []\n if obj_places:\n for review in obj_places.reviews:\n list_reviews.append(review.to_dict())\n return jsonify(list_reviews)\n else:\n abort(404)\n\n\n@app_views.route('/reviews/', methods=['GET'],\n strict_slashes=False)\ndef one_review(review_id):\n \"\"\"Retrieve a review based on review_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review:\n return jsonify(obj_review.to_dict())\n abort(404)\n\n\n@app_views.route('/reviews/', methods=['DELETE'],\n strict_slashes=False)\ndef del_review(review_id):\n \"\"\"Delete a review based on review_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review:\n obj_review.delete()\n storage.save()\n return({})\n abort(404)\n\n\n@app_views.route('/places//reviews', methods=['POST'],\n strict_slashes=False)\ndef create_review(place_id):\n \"\"\"Post review based on json\"\"\"\n obj_place = storage.get(Place, place_id)\n if obj_place is None:\n abort(404)\n obj_dict = request.get_json()\n if obj_dict is None:\n abort(400, 'Not a JSON')\n # transform the HTTP body request to a dictionary\n if 'user_id' not in obj_dict:\n abort(400, 'Missing user_id')\n user_id = obj_dict.get('user_id', None)\n obj_user = storage.get(User, user_id)\n if not obj_user:\n abort(404)\n if 'text' not in obj_dict:\n abort(400, 'Missing text')\n obj_review = Review(place_id=place_id, **obj_dict)\n obj_review.save()\n return jsonify(obj_review.to_dict()), 201\n\n\n@app_views.route('reviews/', methods=['PUT'],\n strict_slashes=False)\ndef update_review(review_id):\n \"\"\"Updates review based on user_id\"\"\"\n obj_review = storage.get(Review, review_id)\n if obj_review is None:\n abort(404)\n # transform the HTTP body request to a dictionary\n to_update = request.get_json()\n if to_update is None:\n abort(400, 'Not a JSON')\n\n # These keys cannot be update\n ignore_keys = ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']\n\n # check if key in dictionary is not allowed to be updated\n for key_ignore in ignore_keys:\n if key_ignore in to_update.keys():\n del to_update[key_ignore]\n if obj_review:\n for key, value in to_update.items():\n setattr(obj_review, key, value)\n obj_review.save()\n return jsonify(obj_review.to_dict()), 200\n else:\n abort(404)\n", "sub_path": "api/v1/views/places_reviews.py", "file_name": "places_reviews.py", "file_ext": "py", "file_size_in_byte": 3060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "models.storage.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 24, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 13, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 13, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 31, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 34, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 27, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 27, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 41, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 41, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 41, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 44, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 37, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 37, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.place.Place", "line_number": 53, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 61, "usage_type": "call"}, {"api_name": "models.storage.get", "line_number": 63, "usage_type": "call"}, {"api_name": "models.user.User", "line_number": 63, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 49, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 49, "usage_type": "name"}, {"api_name": "models.storage.get", "line_number": 77, "usage_type": "call"}, {"api_name": "models.review.Review", "line_number": 77, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 99, "usage_type": "call"}, {"api_name": "api.v1.views.app_views.route", "line_number": 73, "usage_type": "call"}, {"api_name": "api.v1.views.app_views", "line_number": 73, "usage_type": "name"}]}
+{"seq_id": "595554525", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport nsml\nfrom nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML\nfrom dataset import KinQueryDataset, preprocess\n\n\n# DONOTCHANGE: They are reserved for nsml\n# This is for nsml leaderboard\ndef bind_model(sess, config):\n # 학습한 모델을 저장하는 함수입니다.\n def save(dir_name, *args):\n # directory\n os.makedirs(dir_name, exist_ok=True)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(dir_name, 'model'))\n\n # 저장한 모델을 불러올 수 있는 함수입니다.\n def load(dir_name, *args):\n saver = tf.train.Saver()\n # find checkpoint\n ckpt = tf.train.get_checkpoint_state(dir_name)\n if ckpt and ckpt.model_checkpoint_path:\n checkpoint = os.path.basename(ckpt.model_checkpoint_path)\n saver.restore(sess, os.path.join(dir_name, checkpoint))\n else:\n raise NotImplemented('No checkpoint!')\n print('Model loaded')\n\n def infer(raw_data, **kwargs):\n \"\"\"\n :param raw_data: raw input (여기서는 문자열)을 입력받습니다\n :param kwargs:\n :return:\n \"\"\"\n # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다\n preprocessed_data = preprocess(raw_data, config.strmaxlen)\n # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다\n pred = sess.run(prob, feed_dict={x: preprocessed_data})\n clipped = np.array(pred > config.threshold, dtype=np.int)\n # DONOTCHANGE: They are reserved for nsml\n # 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다\n return list(zip(pred.flatten(), clipped.flatten()))\n\n # DONOTCHANGE: They are reserved for nsml\n # nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.\n nsml.bind(save=save, load=load, infer=infer)\n\n\ndef _batch_loader(iterable, n=1):\n length = len(iterable)\n for n_idx in range(0, length, n):\n yield iterable[n_idx:min(n_idx + n, length)]\n\n\ndef conv2d(x, f=64, k=3, s=1, pad='SAME', name=\"conv2d\"):\n return tf.layers.conv2d(x,\n filters=f, kernel_size=k, strides=s,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4),\n bias_initializer=tf.zeros_initializer(),\n padding=pad,\n name=name)\n\n\ndef dense(x, units, name='fc'):\n return tf.layers.dense(x, units,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4),\n bias_initializer=tf.zeros_initializer(),\n name=name)\n\n\ndef batch_norm(x, momentum=0.9, eps=1e-5, is_train=True, name=\"bn\"):\n return tf.layers.batch_normalization(inputs=x,\n momentum=momentum,\n epsilon=eps,\n scale=True,\n trainable=is_train,\n name=name)\n\n\ndef instance_norm(x, name=\"ibn\"):\n epsilon = 1e-9\n\n mean, var = tf.nn.moments(x, [1, 2], keepdims=True, name=name)\n\n return tf.div(tf.subtract(x, mean), tf.sqrt(tf.add(var, epsilon)))\n\n\ndef BiRNN(x, dropout=.5, embed_size=32, seq_length=200, h_units=512):\n n_hidden = h_units\n n_layers = 3\n # not yet implemented...\n return None\n\n\ndef contrastive_loss(y, d, batch_size):\n tmp = y * tf.square(d)\n tmp2 = (1. - y) * tf.square(tf.maximum((1 - d), 0))\n return tf.reduce_sum(tmp + tmp2) / batch_size / 2\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser()\n\n # DONOTCHANGE: They are reserved for nsml\n args.add_argument('--mode', type=str, default='train')\n args.add_argument('--pause', type=int, default=0)\n args.add_argument('--iteration', type=str, default='0')\n\n # User options\n args.add_argument('--output', type=int, default=1)\n args.add_argument('--epochs', type=int, default=151)\n args.add_argument('--batch', type=int, default=256)\n args.add_argument('--strmaxlen', type=int, default=400)\n args.add_argument('--embedding', type=int, default=32)\n args.add_argument('--threshold', type=float, default=0.5)\n args.add_argument('--bn', type=bool, default=False)\n args.add_argument('--lr', type=float, default=1e-4)\n config = args.parse_args()\n\n if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml\n DATASET_PATH = '../sample_data/kin/'\n\n # model's specification (hyper-parameters)\n input_size = config.embedding * config.strmaxlen\n output_size = 1\n fc_unit = 1024\n conv_filters = 64\n learning_rate = config.lr\n character_size = 251\n\n x = tf.placeholder(tf.int32, [None, config.strmaxlen])\n y_ = tf.placeholder(tf.float32, [None, output_size])\n\n # embeddings\n char_embedding = tf.get_variable('char_embedding', [character_size, config.embedding])\n embedded = tf.nn.embedding_lookup(char_embedding, x)\n embedded = tf.reshape(embedded, (-1, 40, 40, 8)) # to 4-D\n\n print(\"[+] embedded size : \", embedded.get_shape().as_list()) # (batch_size, 40, 40, 8)\n\n logits = dense(embedded, output_size, name='fc-2')\n prob = tf.nn.sigmoid(logits)\n\n # logistic loss\n bce_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y_))\n\n # Adam Optimizer\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(bce_loss)\n # RMSProp Optimizer\n # train_step = tf.train.RMSPropOptimizer(learning_rate, momentum=0.9).minimize(bce_loss)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n # not yet implemented...\n\n # DONOTCHANGE: Reserved for nsml\n bind_model(sess=sess, config=config)\n\n # DONOTCHANGE: Reserved for nsml\n if config.pause:\n nsml.paused(scope=locals())\n\n if config.mode == 'train':\n dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)\n\n dataset_len = len(dataset)\n one_batch_size = dataset_len // config.batch\n if dataset_len % config.batch != 0:\n one_batch_size += 1\n\n for epoch in range(config.epochs):\n avg_loss = 0.\n for i, (data, labels) in enumerate(_batch_loader(dataset, config.batch)):\n _, loss = sess.run([train_step, bce_loss],\n feed_dict={\n x: data,\n y_: labels\n })\n\n print('Batch : ', i + 1, '/', one_batch_size, ', BCE in this minibatch: ', float(loss))\n avg_loss += float(loss)\n\n print('epoch:', epoch, ' train_loss:', float(avg_loss / one_batch_size))\n\n min_loss = avg_loss\n\n nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,\n train__loss=float(avg_loss / one_batch_size), step=epoch)\n\n # DONOTCHANGE (You can decide how often you want to save the model)\n nsml.save(epoch)\n\n # [(0.3, 0), (0.7, 1), ... ]\n elif config.mode == 'test_local':\n with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:\n queries = f.readlines()\n res = []\n for batch in _batch_loader(queries, config.batch):\n temp_res = nsml.infer(batch)\n res += temp_res\n\n print(res)\n", "sub_path": "kin/_siamese_not_yet/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.makedirs", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dataset.preprocess", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 67, "usage_type": "attribute"}, {"api_name": "nsml.bind", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.variance_scaling_initializer", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.variance_scaling_initializer", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.l2_regularizer", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros_initializer", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.moments", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorflow.div", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.maximum", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 128, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 132, "usage_type": "call"}, {"api_name": "nsml.HAS_DATASET", "line_number": 150, "usage_type": "name"}, {"api_name": "nsml.IS_ON_NSML", "line_number": 150, "usage_type": "name"}, {"api_name": "nsml.DATASET_PATH", "line_number": 151, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 161, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid_cross_entropy_with_logits", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 178, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 183, "usage_type": "call"}, {"api_name": "nsml.paused", "line_number": 191, "usage_type": "call"}, {"api_name": "dataset.KinQueryDataset", "line_number": 194, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 194, "usage_type": "argument"}, {"api_name": "nsml.report", "line_number": 217, "usage_type": "call"}, {"api_name": "nsml.save", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "nsml.DATASET_PATH", "line_number": 225, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "nsml.infer", "line_number": 229, "usage_type": "call"}]}
+{"seq_id": "170856185", "text": "import sys\nimport os\nimport json\nfrom PyQt5.QtWidgets import (QMainWindow, QPushButton, QApplication, QWidget, QGridLayout, QAction, QVBoxLayout,\n qApp, QFileDialog, QHBoxLayout, QLabel)\nfrom table import Table\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.mappingtables = ''\n # Окно\n self.setGeometry(500, 300, 500, 100)\n self.setWindowTitle(\"JournalChanger\")\n self.mainwidget = QWidget(self)\n self.setCentralWidget(self.mainwidget)\n self.layout_grid = QGridLayout()\n self.mainwidget.setLayout(self.layout_grid)\n # Exit\n exitAction = QAction('&Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(qApp.quit)\n\n # Mapping Settings Actions\n self.newmappingAction = QAction('New mapping')\n self.newmappingAction.setShortcut('Ctrl+N')\n self.newmappingAction.triggered.connect(self.showmapping)\n\n self.openmappingAction = QAction('Open mapping')\n self.openmappingAction.setShortcut('Ctrl+O')\n self.openmappingAction.triggered.connect(self.showmapping)\n\n self.statusBar()\n\n self.menubar = self.menuBar()\n self.fileMenu = self.menubar.addMenu('&File')\n self.settings = self.fileMenu.addMenu('&Mappings')\n self.settings.addAction(self.newmappingAction)\n self.settings.addAction(self.openmappingAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(exitAction)\n\n # buttons\n self.but1 = QPushButton(\"Show Mappings\")\n self.layout_grid.addWidget(self.but1)\n self.but1.clicked.connect(self.showmapping)\n\n def showmapping(self):\n if self.sender().text() == \"New mapping\":\n filename = 'New'\n elif self.sender().text() == \"Open mapping\" or self.sender().text() == \"Show Mappings\":\n filename = QFileDialog.getOpenFileName(self, 'Open Mapping File', os.getcwd())\n else:\n print(\"test Button activated\")\n self.mappingtables = MappingWindow(filename)\n self.mappingtables.show()\n\n\nclass MappingWindow(QMainWindow):\n def __init__(self, filename):\n super().__init__()\n self.filename = filename\n # Window\n self.setGeometry(500, 300, 500, 100)\n self.setWindowTitle(\"Mappings\")\n self.mappingwidget = QWidget(self)\n self.setCentralWidget(self.mappingwidget)\n self.vbox_grid = QVBoxLayout()\n self.hbox_table_grid = QHBoxLayout()\n self.hbox_label_grid = QHBoxLayout()\n self.mappingwidget.setLayout(self.vbox_grid)\n\n # Toolbar actions\n # Exit\n exitAction = QAction('&Close', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip(\"Close mappings\")\n # Add row\n addrows = QAction('Add Row', self)\n addrows.triggered.connect(self.addrow)\n addrows.setStatusTip(\"Add row to tables\")\n # Delete row\n delrow = QAction('Del Row', self)\n delrow.triggered.connect(self.delrow)\n delrow.setStatusTip(\"Delete selected row\")\n\n self.statusBar()\n\n self.menu = self.menuBar()\n fileMenu = self.menu.addMenu('&File')\n fileMenu.addAction(addrows)\n fileMenu.addAction(exitAction)\n\n self.toolbar = self.addToolBar(\"&Add\")\n self.toolbar.addAction(addrows)\n self.toolbar.addAction(delrow)\n\n # Table for mapping\n self.sourcetable = Table(self.filename, 'Source')\n self.hbox_table_grid.addWidget(self.sourcetable)\n self.hbox_table_grid.addSpacing(40)\n self.targettable = Table(self.filename, 'Target')\n self.hbox_table_grid.addWidget(self.targettable)\n\n self.label1 = QLabel(\"Source\")\n self.label2 = QLabel('Target')\n self.hbox_label_grid.addWidget(self.label1)\n self.hbox_label_grid.addSpacing(40)\n self.hbox_label_grid.addWidget(self.label2)\n\n self.vbox_grid.addLayout(self.hbox_label_grid)\n self.vbox_grid.addLayout(self.hbox_table_grid)\n\n self.butsave = QPushButton('Save mapping')\n self.butsave.clicked.connect(self.writemapping)\n\n self.vbox_grid.addWidget(self.butsave)\n\n def addrow(self):\n row = self.sourcetable.rowCount() + 1\n self.sourcetable.setRowCount(row)\n self.targettable.setRowCount(row)\n\n def delrow(self):\n # delete selected row from source and target tables\n rowtodel = self.sourcetable.currentRow()\n self.sourcetable.removeRow(rowtodel)\n self.targettable.removeRow(rowtodel)\n\n def writemapping(self):\n filesave = QFileDialog.getSaveFileName(self, \"Save mapping\", os.getcwd(), \"JSON files(*.json)\")\n print(filesave)\n mappings = {}\n for i in range(0, self.sourcetable.rowCount()):\n mapp = (dict\n (\n Source=dict(\n account=self.sourcetable.item(i, 0).text(),\n ICP=self.sourcetable.item(i, 1).text(),\n MovProd=self.sourcetable.item(i, 2).text(),\n VarLob=self.sourcetable.item(i, 3).text(),\n MktOvr=self.sourcetable.item(i, 4).text(),\n AuditDim=self.sourcetable.item(i, 5).text(),\n RelPartDisc=self.sourcetable.item(i, 6).text(),\n CostCenterDisc=self.sourcetable.item(i, 7).text(),\n CustomType=self.sourcetable.item(i, 8).text()\n ),\n Target=dict(\n account=self.targettable.item(i, 0).text(),\n ICP=self.targettable.item(i, 1).text(),\n MovProd=self.targettable.item(i, 2).text(),\n VarLob=self.targettable.item(i, 3).text(),\n MktOvr=self.targettable.item(i, 4).text(),\n AuditDim=self.targettable.item(i, 5).text(),\n RelPartDisc=self.targettable.item(i, 6).text(),\n CostCenterDisc=self.targettable.item(i, 7).text(),\n CustomType=self.targettable.item(i, 8).text()\n )\n )\n )\n mapp2 = dict.fromkeys([self.sourcetable.item(i, 0).text()], mapp)\n mappings.update(mapp2)\n mappingtowrite = dict.fromkeys(['Mappings'], mappings)\n print(mappingtowrite)\n with open(filesave[0], 'w', encoding='utf-8') as writefile:\n x = json.dumps(mappingtowrite, sort_keys=True, indent=4, ensure_ascii=False)\n writefile.write(x)\n writefile.close()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n", "sub_path": "GUI.py", "file_name": "GUI.py", "file_ext": "py", "file_size_in_byte": 6815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.qApp.quit", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.qApp", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 54, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 71, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 85, "usage_type": "call"}, {"api_name": "table.Table", "line_number": 101, "usage_type": "call"}, {"api_name": "table.Table", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName", "line_number": 133, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 133, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 168, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 173, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 176, "usage_type": "call"}]}
+{"seq_id": "435584659", "text": "import tkinter as tk\nimport serial\nimport simplejson\nimport time\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom drawnow import *\nimport matplotlib.pyplot as plt\n\n\ndatos = []\narduinoData = serial.Serial('COM3',9600,timeout=5)\nplt.ion()\ncnt = 0\n\ndef suma():\n suma = int(entrada1.get())\n arduinoData.write(suma)\n time.sleep(1)\n print(suma)\n return var.set(suma)\n\ndef makeFig():\n plt.ylim(0,50)\n plt.title('Medicion de la Temperatura')\n plt.grid(True)\n plt.ylabel('Temperatura')\n plt.plot(datos,'ro-',label='Temperatura')\n plt.legend(loc='upper left')\n fig.canvas.draw()\n\nventana = tk.Tk()\nventana.wm_title(\"Lectura de Temperaturas\")\nvar = tk.StringVar()\n\nfig = plt.figure()\ncanvas = FigureCanvasTkAgg(fig,master=ventana)\ncanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\nel = tk.Label(ventana,text=\"Numero1: \",bg=\"pink\",fg=\"white\")\nel.pack(padx=5,pady=4,ipadx=5,ipady=5,fill=tk.X)\n\nentrada1 = tk.Entry(ventana)\nentrada1.pack(fill=tk.X,padx=5,pady=5,ipadx=5,ipady=5)\n\nbotonSuma = tk.Button(ventana,text=\"Suma\",fg=\"blue\",command=suma)\nbotonSuma.pack(side=tk.TOP)\n\nwhile True:\n while(arduinoData.inWaiting()==0):\n pass\n arduinoString = arduinoData.readline()\n jsonObject = simplejson.loads(arduinoString)\n temp = float(jsonObject[\"t\"])\n y = float(jsonObject[\"y\"])\n time.sleep(0.01)\n print(temp,\",\",y)\n datos.append(temp)\n drawnow(makeFig)\n plt.pause(.0001)\nventana.mainloop()", "sub_path": "python-arduino/Tkinter5.py", "file_name": "Tkinter5.py", "file_ext": "py", "file_size_in_byte": 1480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 31, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 36, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 39, "usage_type": "call"}, {"api_name": "tkinter.X", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 42, "usage_type": "call"}, {"api_name": "tkinter.X", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 46, "usage_type": "attribute"}, {"api_name": "simplejson.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]}
+{"seq_id": "551360437", "text": "import os.path as osp\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.datasets import Planetoid\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import GCNConv, ChebConv # noqa\nfrom torch_geometric.utils import from_scipy_sparse_matrix\n\nimport pickle\nimport numpy as np\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--use_gdc', action='store_true',\n help='Use GDC preprocessing.')\nargs = parser.parse_args()\n\ndataset = 'ota'\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)\nprint(path)\n# dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())\n# data = dataset[0]\n\nwith open(osp.join(path,'processed_data.p'), 'rb') as fp:\n all_inputs = pickle.load(fp)\n\nif 'circuit_graph' in locals():\n del circuit_graph\n\nfor circuit_name, circuit_data in all_inputs.items():\n df = circuit_data[\"data_matrix\"]\n print(circuit_name)\n node_features = df.values\n node_features = np.delete(node_features, 0, 1)\n node_features = np.array(node_features, dtype=np.float32)\n node_features = node_features[:, 0:16]\n x = torch.Tensor(node_features)\n y = torch.Tensor(circuit_data[\"target\"])\n adj = circuit_data[\"adjacency_matrix\"]\n # print(adj.todense())\n edge_index, edge_weight = from_scipy_sparse_matrix(adj)\n print(edge_index,edge_weight)\n exit()\n if 'circuit_graph' in locals() and 'X' in locals():\n X = np.concatenate((X, node_features), axis=0)\n label = circuit_data[\"target\"].reshape((-1, 1))\n y = np.concatenate((y, label), axis=0)\n igraph = circuit_data[\"adjacency_matrix\"]\n circuit_graph = block_diag((circuit_graph, igraph)).tocsr()\n else:\n X = node_features\n y = circuit_data[\"target\"].reshape((-1, 1))\n circuit_graph = circuit_data[\"adjacency_matrix\"]\n\n\nif args.use_gdc:\n gdc = T.GDC(self_loop_weight=1, normalization_in='sym',\n normalization_out='col',\n diffusion_kwargs=dict(method='ppr', alpha=0.05),\n sparsification_kwargs=dict(method='topk', k=128,\n dim=0), exact=True)\n data = gdc(data)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(dataset.num_features, 16, cached=True,\n normalize=not args.use_gdc)\n self.conv2 = GCNConv(16, dataset.num_classes, cached=True,\n normalize=not args.use_gdc)\n # self.conv1 = ChebConv(data.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, data.num_features, K=2)\n\n def forward(self):\n x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr\n x = F.relu(self.conv1(x, edge_index, edge_weight))\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index, edge_weight)\n return F.log_softmax(x, dim=1)\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel, data = Net().to(device), data.to(device)\noptimizer = torch.optim.Adam([\n dict(params=model.conv1.parameters(), weight_decay=5e-4),\n dict(params=model.conv2.parameters(), weight_decay=0)\n], lr=0.01) # Only perform weight-decay on first convolution.\n\n\ndef train():\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n\n@torch.no_grad()\ndef test():\n model.eval()\n logits, accs = model(), []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n pred = logits[mask].max(1)[1]\n acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\n accs.append(acc)\n return accs\n\n\nbest_val_acc = test_acc = 0\nfor epoch in range(1, 201):\n train()\n train_acc, val_acc, tmp_test_acc = test()\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n test_acc = tmp_test_acc\n log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n print(log.format(epoch, train_acc, best_val_acc, test_acc))\n", "sub_path": "examples/gcn_gana.py", "file_name": "gcn_gana.py", "file_ext": "py", "file_size_in_byte": 4082, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "torch_geometric.utils.from_scipy_sparse_matrix", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "torch_geometric.transforms.GDC", "line_number": 59, "usage_type": "call"}, {"api_name": "torch_geometric.transforms", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 70, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "224924702", "text": "\"\"\" Launcher functionality for the Google Compute Engine (GCE)\n\"\"\"\nimport json\nimport logging\nimport os\n\nfrom dcos_launch import onprem, util\nfrom dcos_launch.platforms import gcp\nfrom dcos_test_utils.helpers import Host\nfrom googleapiclient.errors import HttpError\n\nlog = logging.getLogger(__name__)\n\n\ndef get_credentials(env=None) -> tuple:\n path = None\n if env is None:\n env = os.environ.copy()\n if 'GCE_CREDENTIALS' in env:\n json_credentials = env['GCE_CREDENTIALS']\n elif 'GOOGLE_APPLICATION_CREDENTIALS' in env:\n path = env['GOOGLE_APPLICATION_CREDENTIALS']\n json_credentials = util.read_file(path)\n else:\n raise util.LauncherError(\n 'MissingParameter', 'Either GCE_CREDENTIALS or GOOGLE_APPLICATION_CREDENTIALS must be set in env')\n\n return json_credentials, path\n\n\nclass OnPremLauncher(onprem.AbstractOnpremLauncher):\n # Launches a homogeneous cluster of plain GMIs intended for onprem DC/OS\n def __init__(self, config: dict, env=None):\n creds_string, _ = get_credentials(env)\n self.gcp_wrapper = gcp.GcpWrapper(json.loads(creds_string))\n self.config = config\n\n @property\n def deployment(self):\n \"\"\" Builds a BareClusterDeployment instance with self.config, but only returns it successfully if the\n corresponding real deployment (active machines) exists and doesn't contain any errors.\n \"\"\"\n try:\n deployment = gcp.BareClusterDeployment(self.gcp_wrapper, self.config['deployment_name'],\n self.config['gce_zone'])\n info = deployment.get_info()\n errors = info['operation'].get('error')\n if errors:\n raise util.LauncherError('DeploymentContainsErrors', str(errors))\n return deployment\n except HttpError as e:\n if e.resp.status == 404:\n raise util.LauncherError('DeploymentNotFound',\n \"The deployment you are trying to access doesn't exist\") from e\n raise e\n\n def create(self) -> dict:\n self.key_helper()\n node_count = 1 + (self.config['num_masters'] + self.config['num_public_agents']\n + self.config['num_private_agents'])\n gcp.BareClusterDeployment.create(\n self.gcp_wrapper,\n self.config['deployment_name'],\n self.config['gce_zone'],\n node_count,\n self.config['disk_size'],\n self.config['disk_type'],\n self.config['source_image'],\n self.config['machine_type'],\n self.config['image_project'],\n self.config['ssh_user'],\n self.config['ssh_public_key'],\n self.config['disable_updates'],\n self.config['use_preemptible_vms'],\n tags=self.config.get('tags'))\n return self.config\n\n def key_helper(self):\n \"\"\" Generates a public key and a private key and stores them in the config. The public key will be applied to\n all the instances in the deployment later on when wait() is called.\n \"\"\"\n if self.config['key_helper']:\n private_key, public_key = util.generate_rsa_keypair()\n self.config['ssh_private_key'] = private_key.decode()\n self.config['ssh_public_key'] = public_key.decode()\n\n def get_cluster_hosts(self) -> [Host]:\n return list(self.deployment.hosts)[1:]\n\n def get_bootstrap_host(self) -> Host:\n return list(self.deployment.hosts)[0]\n\n def wait(self):\n \"\"\" Waits for the deployment to complete: first, the network that will contain the cluster is deployed. Once\n the network is deployed, a firewall for the network and an instance template are deployed. Finally,\n once the instance template is deployed, an instance group manager and all its instances are deployed.\n \"\"\"\n self.deployment.wait_for_completion()\n\n def delete(self):\n \"\"\" Deletes all the resources associated with the deployment (instance template, network, firewall, instance\n group manager and all its instances.\n \"\"\"\n self.deployment.delete()\n", "sub_path": "dcos_launch/gcp.py", "file_name": "gcp.py", "file_ext": "py", "file_size_in_byte": 4208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "dcos_launch.util.read_file", "line_number": 23, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 23, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 25, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 25, "usage_type": "name"}, {"api_name": "dcos_launch.onprem.AbstractOnpremLauncher", "line_number": 31, "usage_type": "attribute"}, {"api_name": "dcos_launch.onprem", "line_number": 31, "usage_type": "name"}, {"api_name": "dcos_launch.platforms.gcp.GcpWrapper", "line_number": 35, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment", "line_number": 44, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 44, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 49, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 49, "usage_type": "name"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 51, "usage_type": "name"}, {"api_name": "dcos_launch.util.LauncherError", "line_number": 53, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 53, "usage_type": "name"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment.create", "line_number": 61, "usage_type": "call"}, {"api_name": "dcos_launch.platforms.gcp.BareClusterDeployment", "line_number": 61, "usage_type": "attribute"}, {"api_name": "dcos_launch.platforms.gcp", "line_number": 61, "usage_type": "name"}, {"api_name": "dcos_launch.util.generate_rsa_keypair", "line_number": 83, "usage_type": "call"}, {"api_name": "dcos_launch.util", "line_number": 83, "usage_type": "name"}, {"api_name": "dcos_test_utils.helpers.Host", "line_number": 87, "usage_type": "name"}, {"api_name": "dcos_test_utils.helpers.Host", "line_number": 90, "usage_type": "name"}]}
+{"seq_id": "414657027", "text": "import os.path as osp\nimport logging\nimport time\nimport argparse\nimport csv\nfrom collections import OrderedDict\n\nimport options.options as option\nimport utils.util as util\nfrom data.util import bgr2ycbcr\nfrom data import create_dataset, create_dataloader\nfrom models import create_model\n\n\ndef cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img):\n # save images\n suffix = opt['suffix']\n if suffix:\n save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '.png')\n else:\n save_img_path = osp.join(dataset_dir, folder, img_name + '.png')\n util.save_img(sr_img, save_img_path)\n #\n # if suffix:\n # save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_GT.png')\n # else:\n # save_img_path = osp.join(dataset_dir, folder, img_name + '_GT.png')\n # util.save_img(gt_img, save_img_path)\n #\n if suffix:\n save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_LR.png')\n else:\n save_img_path = osp.join(dataset_dir, folder, img_name + '_LR.png')\n util.save_img(lr_img, save_img_path)\n #\n # if suffix:\n # save_img_path = osp.join(dataset_dir, folder, img_name + suffix + '_LR_ref.png')\n # else:\n # save_img_path = osp.join(dataset_dir, folder, img_name + '_LR_ref.png')\n # util.save_img(lrgt_img, save_img_path)\n\n # calculate PSNR and SSIM\n gt_img = gt_img / 255.\n sr_img = sr_img / 255.\n\n lr_img = lr_img / 255.\n lrgt_img = lrgt_img / 255.\n\n crop_border = opt['crop_border'] if opt['crop_border'] else opt['scale']\n if crop_border == 0:\n cropped_sr_img = sr_img\n cropped_gt_img = gt_img\n else:\n cropped_sr_img = sr_img[crop_border:-crop_border, crop_border:-crop_border, :]\n cropped_gt_img = gt_img[crop_border:-crop_border, crop_border:-crop_border, :]\n\n psnr = util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)\n ssim = util.calculate_ssim(cropped_sr_img * 255, cropped_gt_img * 255)\n test_results['psnr'].append(psnr)\n test_results['ssim'].append(ssim)\n\n # PSNR and SSIM for LR\n psnr_lr = util.calculate_psnr(lr_img * 255, lrgt_img * 255)\n ssim_lr = util.calculate_ssim(lr_img * 255, lrgt_img * 255)\n test_results['psnr_lr'].append(psnr_lr)\n test_results['ssim_lr'].append(ssim_lr)\n\n if gt_img.shape[2] == 3: # RGB image\n sr_img_y = bgr2ycbcr(sr_img, only_y=True)\n gt_img_y = bgr2ycbcr(gt_img, only_y=True)\n if crop_border == 0:\n cropped_sr_img_y = sr_img_y\n cropped_gt_img_y = gt_img_y\n else:\n cropped_sr_img_y = sr_img_y[crop_border:-crop_border, crop_border:-crop_border]\n cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]\n psnr_y = util.calculate_psnr(cropped_sr_img_y * 255, cropped_gt_img_y * 255)\n ssim_y = util.calculate_ssim(cropped_sr_img_y * 255, cropped_gt_img_y * 255)\n test_results['psnr_y'].append(psnr_y)\n test_results['ssim_y'].append(ssim_y)\n\n lr_img_y = bgr2ycbcr(lr_img, only_y=True)\n lrgt_img_y = bgr2ycbcr(lrgt_img, only_y=True)\n psnr_y_lr = util.calculate_psnr(lr_img_y * 255, lrgt_img_y * 255)\n ssim_y_lr = util.calculate_ssim(lr_img_y * 255, lrgt_img_y * 255)\n test_results['psnr_y_lr'].append(psnr_y_lr)\n test_results['ssim_y_lr'].append(ssim_y_lr)\n\n writer.writerow([osp.join(folder, img_name), psnr_y, psnr_y_lr, ssim_y, ssim_y_lr])\n logger.info(\n '{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}. LR PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'.\n format(osp.join(folder, img_name), psnr, ssim, psnr_y, ssim_y, psnr_lr, ssim_lr, psnr_y_lr, ssim_y_lr))\n else:\n writer.writerow([osp.join(folder, img_name), psnr, psnr_lr])\n logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}. LR PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(\n osp.join(folder, img_name), psnr, ssim, psnr_lr, ssim_lr))\n\n return test_results\n\n\n# options\nparser = argparse.ArgumentParser()\nparser.add_argument('-opt', type=str, required=True, help='Path to options YMAL file.')\nopt = option.parse(parser.parse_args().opt, is_train=False)\nopt = option.dict_to_nonedict(opt)\n\nutil.mkdirs(\n (path for key, path in opt['path'].items()\n if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))\nutil.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,\n screen=True, tofile=True)\nlogger = logging.getLogger('base')\nlogger.info(option.dict2str(opt))\n\n# Create test dataset and dataloader\ntest_loaders = []\nfor phase, dataset_opt in sorted(opt['datasets'].items()):\n test_set = create_dataset(dataset_opt)\n test_loader = create_dataloader(test_set, dataset_opt)\n logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))\n test_loaders.append(test_loader)\n\nmodel = create_model(opt)\nfor test_loader in test_loaders:\n test_set_name = test_loader.dataset.opt['name']\n logger.info('\\nTesting [{:s}]...'.format(test_set_name))\n test_start_time = time.time()\n dataset_dir = osp.join(opt['path']['results_root'], test_set_name)\n # util.mkdir(dataset_dir)\n\n test_results = OrderedDict()\n test_results['psnr'] = []\n test_results['ssim'] = []\n test_results['psnr_y'] = []\n test_results['ssim_y'] = []\n\n test_results['psnr_lr'] = []\n test_results['ssim_lr'] = []\n test_results['psnr_y_lr'] = []\n test_results['ssim_y_lr'] = []\n\n with open(osp.join(opt['path']['log'], 'test_' + opt['name'] + '_test.csv'), 'w') as f:\n writer = csv.writer(f)\n for data in test_loader:\n model.feed_data(data)\n if test_set_name == 'Vid4':\n folder = osp.split(osp.dirname(data['GT_path'][0][0]))[1]\n else:\n folder = ''\n util.mkdir(osp.join(dataset_dir, folder))\n\n model.test()\n visuals = model.get_current_visuals()\n\n if test_set_name == 'Vimeo90K':\n center = visuals['SR'].shape[0] // 2\n img_path = data['GT_path'][0]\n img_name = osp.splitext(osp.basename(img_path))[0]\n\n sr_img = util.tensor2img(visuals['SR']) # uint8\n gt_img = util.tensor2img(visuals['GT'][center]) # uint8\n lr_img = util.tensor2img(visuals['LR']) # uint8\n lrgt_img = util.tensor2img(visuals['LR_ref'][center]) # uint8\n\n test_results = cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img)\n\n else:\n t_step = visuals['SR'].shape[0]\n for i in range(t_step):\n img_path = data['GT_path'][i][0]\n img_name = osp.splitext(osp.basename(img_path))[0]\n\n sr_img = util.tensor2img(visuals['SR'][i]) # uint8\n gt_img = util.tensor2img(visuals['GT'][i]) # uint8\n lr_img = util.tensor2img(visuals['LR'][i]) # uint8\n lrgt_img = util.tensor2img(visuals['LR_ref'][i]) # uint8\n\n test_results = cal_pnsr_ssim(sr_img, gt_img, lr_img, lrgt_img)\n\n # Average PSNR/SSIM results\n ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])\n ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])\n\n ave_psnr_lr = sum(test_results['psnr_lr']) / len(test_results['psnr_lr'])\n ave_ssim_lr = sum(test_results['ssim_lr']) / len(test_results['ssim_lr'])\n\n logger.info(\n '----Average PSNR/SSIM results for {}----\\n\\tpsnr: {:.6f} db; ssim: {:.6f}. LR psnr: {:.6f} db; ssim: {:.6f}.\\n'.format(\n test_set_name, ave_psnr, ave_ssim, ave_psnr_lr, ave_ssim_lr))\n if test_results['psnr_y'] and test_results['ssim_y']:\n ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])\n ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])\n\n ave_psnr_y_lr = sum(test_results['psnr_y_lr']) / len(test_results['psnr_y_lr'])\n ave_ssim_y_lr = sum(test_results['ssim_y_lr']) / len(test_results['ssim_y_lr'])\n logger.info(\n '----Y channel, average PSNR/SSIM----\\n\\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}. LR PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.\\n'.\n format(ave_psnr_y, ave_ssim_y, ave_psnr_y_lr, ave_ssim_y_lr))\n", "sub_path": "codes/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 8426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.util.save_img", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.util.save_img", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.util.calculate_psnr", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 58, "usage_type": "name"}, {"api_name": "utils.util.calculate_psnr", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 63, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 64, "usage_type": "name"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 69, "usage_type": "call"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.util.calculate_psnr", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 78, "usage_type": "name"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 82, "usage_type": "call"}, {"api_name": "data.util.bgr2ycbcr", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.util.calculate_psnr", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 84, "usage_type": "name"}, {"api_name": "utils.util.calculate_ssim", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 102, "usage_type": "call"}, {"api_name": "options.options.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "options.options", "line_number": 104, "usage_type": "name"}, {"api_name": "options.options.dict_to_nonedict", "line_number": 105, "usage_type": "call"}, {"api_name": "options.options", "line_number": 105, "usage_type": "name"}, {"api_name": "utils.util.mkdirs", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 107, "usage_type": "name"}, {"api_name": "utils.util.setup_logger", "line_number": 110, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 110, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 110, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 112, "usage_type": "call"}, {"api_name": "options.options.dict2str", "line_number": 113, "usage_type": "call"}, {"api_name": "options.options", "line_number": 113, "usage_type": "name"}, {"api_name": "data.create_dataset", "line_number": 118, "usage_type": "call"}, {"api_name": "data.create_dataloader", "line_number": 119, "usage_type": "call"}, {"api_name": "models.create_model", "line_number": 123, "usage_type": "call"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 143, "usage_type": "call"}, {"api_name": "data.util", "line_number": 144, "usage_type": "name"}, {"api_name": "data.util", "line_number": 145, "usage_type": "argument"}, {"api_name": "os.path.split", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 147, "usage_type": "call"}, {"api_name": "data.util", "line_number": 147, "usage_type": "name"}, {"api_name": "utils.util.mkdir", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "name"}, {"api_name": "data.util", "line_number": 157, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.util.tensor2img", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 160, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 161, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 162, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 162, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 163, "usage_type": "name"}, {"api_name": "data.util", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.util.tensor2img", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 173, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 174, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.util.tensor2img", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.util", "line_number": 176, "usage_type": "name"}]}
+{"seq_id": "228732805", "text": "#######################################################\n#################### IMPORT LIBRARY ####################\n########################################################\nimport bs4\nimport lxml\nimport numpy\nfrom pandas import *\nimport stop_words\nimport re\nimport time\nimport requests\nimport datetime\nimport os\nfrom selenium import webdriver\n\nbrowser = webdriver.Chrome(\"/Users/chou/Google Drive/websites/github/web_scraping/chromedriver\")\n\n\n###################################################\n#################### ARGUMENTS ####################\n###################################################\ninput_job = \"data scientist\"\ninput_quote = False\ninput_city = \"\"\ninput_state = \"NC\"\nsign_1 = \"-\"\nsign_2 = \"+\"\n\nBASE_URL_indeed = 'http://www.indeed.com'\nBASE_URL_monster = 'https://www.monster.com'\nBASE_URL_dice = 'https://www.dice.com'\nBASE_URL_careerbuilder = 'http://www.careerbuilder.com'\n\nclass syntax:\n def __init__(self, input, sign, quote = False):\n self.input = input\n self.sign = sign\n self.quote = quote\n\n def transform(self):\n syntax.output = self.input.replace(\" \", self.sign)\n if self.quote == True:\n syntax.output = ''.join(['\"', syntax.output, '\"'])\n return(syntax.output)\n\ndef basic_careerbuilder(BASE_URL, input_job, input_city, input_state, input_quote, sign_1, sign_2):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30'}\n if input_city != \"\":\n basic_url = [ BASE_URL, '/jobs-',\n syntax(input_job, sign_1, input_quote).transform(), '-in-', input_city,\n ',', input_state]\n basic_url = ''.join(basic_url)\n url_careerbuilder_list = [ basic_url, '?keywords=', syntax(input_job, sign_2, input_quote).transform(),\n '&location=', input_city, '%2C+', input_state ]\n url_careerbuilder = ''.join(url_careerbuilder_list)\n else:\n basic_url = [ BASE_URL, '/jobs-',\n syntax(input_job, sign_1, input_quote).transform(), '-in-',\n input_state]\n basic_url = ''.join(basic_url)\n url_careerbuilder_list = [ basic_url, '?keywords=', syntax(input_job, sign_2, input_quote).transform(),\n '&location=', input_state ]\n url_careerbuilder = ''.join(url_careerbuilder_list)\n\n print(url_careerbuilder)\n try:\n rawcode_careerbuilder = browser.get(url_careerbuilder) # timeout = 3, headers=headers\n #requests.get\n soup_careerbuilder = bs4.BeautifulSoup(browser.page_source, \"lxml\") #rawcode_careerbuilder.text\n except requests.exceptions.Timeout:\n pass\n\n browser.close()\n num_total_careerbuilder = soup_careerbuilder.find(\n 'div', {'class' : 'count'}).contents[0]\n num_total_careerbuilder = int(re.sub('[\\(\\)\\{\\}<>]', '',\n num_total_careerbuilder).split()[0])\n print(num_total_careerbuilder)\n num_pages_careerbuilder = int(numpy.ceil(num_total_careerbuilder/25.0))\n print(num_pages_careerbuilder)\n\n job_df_careerbuilder = pandas.DataFrame()\n for i in range(1, num_pages_careerbuilder+1):\n url = ''.join([basic_url,'?page_number=', str(i)])\n\n rawcode = requests.get(url, headers=headers)\n soup = bs4.BeautifulSoup(rawcode.text, \"lxml\")\n\n divs = soup.findAll(\"div\")\n job_divs = [jp for jp in divs if not jp.get('class') is None\n and 'job-row' in jp.get('class')]\n\n for job in job_divs:\n try:\n id = job.find('h2',{'class' : 'job-title'}).find('a').attrs['data-job-did']\n title = job.find('h2', {'class' : 'job-title'}).text.strip()\n company = job.find('div', {'class' : 'columns large-2 medium-3 small-12'}).find(\n 'h4', {'class': 'job-text'}).text.strip()\n location = job.find('div', {'class' : 'columns end large-2 medium-3 small-12'}).find(\n 'h4', {'class': 'job-text'}).text.strip()\n link = BASE_URL_careerbuilder + '/job/' + id\n except:\n continue\n\n job_df_careerbuilder = job_df_careerbuilder.append({'job_title': title,\n 'job_id': id,\n 'job_company': company,\n 'from':'Careerbuilder',\n 'job_location':location,\n 'job_link':link},ignore_index=True)\n cols=['from','job_id','job_title','job_company','job_location','job_link']\n job_df_careerbuilder = job_df_careerbuilder[cols] # reorder the columns of dataframe\n job_df_careerbuilder = job_df_careerbuilder.drop_duplicates(['job_link'], keep='first')\n return(job_df_careerbuilder)\n\njob_df_careerbuilder = basic_careerbuilder(BASE_URL_careerbuilder, input_job, input_city, input_state, input_quote,\n sign_1, sign_2)\nprint(job_df_careerbuilder.shape)\n\n##########################################################################\n#################### Job Info that I am interested in ####################\n##########################################################################\n##### Job types #####\ntype = ['Full-time', 'Part-time', 'Contractor', 'Contract', 'Full time', 'Part time']\ntype_lower = [s.lower() for s in type]\ntype_map = pandas.DataFrame({'raw':type, 'lower':type_lower})\ntype_dic = list(type_map.set_index('lower').to_dict().values()).pop()\n\n##### Skills #####\nskills = ['Scala', 'Ruby', 'C++', 'Perl', 'R', 'Java', 'Matlab', 'JavaScript',\n 'Python', 'SPSS', 'D3.js', 'Tableau', 'Excel', 'SAS', 'D3', 'Mahout',\n 'Hadoop', 'Pig', 'Spark', 'ZooKeeper', 'MapReduce', 'Shark', 'Hive',\n 'Oozie', 'Flume', 'HBase', 'Cassandra', 'NoSQL', 'SQL', 'MongoDB', 'GIS',\n 'AWS', 'Haskell', 'PHP', 'Perl', 'Stata', 'Shiny']\nskills_lower = [s.lower() for s in skills]\nskills_map = pandas.DataFrame({'raw':skills, 'lower':skills_lower})\nskills_dic = list(skills_map.set_index('lower').to_dict().values()).pop()\n\n##### Education #####\nedu = ['Bachelor', 'Master', 'PhD', 'MBA', 'M.S.', 'M.S', 'MS', 'Ph.D.', 'BS',\n \"Bachelor's\", \"Master's\", \"PhD's\"]\nedu_lower = [s.lower() for s in edu]\nedu_map = pandas.DataFrame({'raw':edu, 'lower':edu_lower})\nedu_dic = list(edu_map.set_index('lower').to_dict().values()).pop()\n\n##### Major ######\nmajor = ['Computer Science', 'Statistics', 'Mathematics', 'Math','Physics',\n 'Machine Learning','Economics','Software Engineering', 'Engineering',\n 'Information System', 'Quantitative Finance', 'Biostatistics', 'Bioinformatics',\n 'AI', 'Artificial Intelligence']\nmajor_lower = [s.lower() for s in major]\nmajor_map = pandas.DataFrame({'raw':major, 'lower':major_lower})\nmajor_dic = list(major_map.set_index('lower').to_dict().values()).pop()\n\n##### Key Words ######\nkeywords = ['Web Analytics', 'Regression', 'Classification', 'User Experience', 'Big Data',\n 'Streaming Data', 'Real-Time Data', 'Real Time', 'Time Series']\nkeywords_lower = [s.lower() for s in keywords]\nkeywords_map = pandas.DataFrame({'raw':keywords, 'lower':keywords_lower})\nkeywords_dic = list(keywords_map.set_index('lower').to_dict().values()).pop()\n\n###############################################################\n#################### Function for Scraping ####################\n###############################################################\ndef scrape_job(link):\n required_type= []\n required_skills = []\n required_edu = []\n required_major = []\n required_keywords = []\n\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30'}\n job_page = requests.get(link, headers=headers)\n\n soup = bs4.BeautifulSoup(job_page.text, \"lxml\")\n for elem in soup.findAll(['script','style','head','title']):\n elem.extract()\n texts = soup.getText(separator=' ').lower()\n\n string = re.sub(r'\\,', ' ', texts) # remove \",\"\n # print(string.encode('utf-8'))\n string = re.sub('/', ' ', string) # remove \"/\"\n # print(string.encode('utf-8'))\n string = re.sub(r'\\(', ' ', string) # remove \"(\"\n # print(string.encode('utf-8'))\n string = re.sub(r'\\)', ' ', string) # remove \")\"\n # print(string.encode('utf-8'))\n string = re.sub(r'[\\n\\r\\t]', ' ', string) # remove \"\\n\", \"\\r\", \"\\t\"\n # print(string.encode('utf-8'))\n string = re.sub(' +',' ',string) # remove more than one space\n string = re.sub(r'r\\s&\\sd', ' ', string) # avoid picking 'r & d'\n string = re.sub(r'r&d', ' ', string) # avoid picking 'r&d'\n # print(string.encode('utf-8'))\n\n for typ in type_lower :\n if any(x in typ for x in ['+', '#', '.']):\n typp = re.escape(typ)\n else:\n typp = typ\n result = re.search(r'(?:^|(?<=\\s))' + typp + r'(?=\\s|$)', string)\n if result:\n required_type.append(typ)\n\n for sk in skills_lower :\n if any(x in sk for x in ['+', '#', '.']):\n skk = re.escape(sk)\n else:\n skk = sk\n result = re.search(r'(?:^|(?<=\\s))' + skk + r'(?=\\s|$)',string)\n if result:\n required_skills.append(sk)\n\n for ed in edu_lower :\n if any(x in ed for x in ['+', '#', '.']):\n edd = re.escape(ed)\n else:\n edd = ed\n result = re.search(r'(?:^|(?<=\\s))' + edd + r'(?=\\s|$)', string)\n if result:\n required_edu.append(ed)\n\n for maj in major_lower :\n if any(x in maj for x in ['+', '#', '.']):\n majj = re.escape(maj)\n else:\n majj = maj\n result = re.search(r'(?:^|(?<=\\s))' + majj + r'(?=\\s|$)', string)\n if result:\n required_major.append(maj)\n\n for key in keywords_lower :\n if any(x in key for x in ['+', '#', '.']):\n keyy = re.escape(key)\n else:\n keyy = key\n result = re.search(r'(?:^|(?<=\\s))' + keyy + r'(?=\\s|$)', string)\n if result:\n required_keywords.append(key)\n\n except:\n required_type = 'Forbidden'\n required_skills = 'Forbidden'\n required_edu ='Forbidden'\n rquired_major = 'Forbidden'\n required_keywords = 'Forbidden'\n # continue\n\n all_job ={'type':required_type, 'skills':required_skills, 'edu':required_edu,\n 'major':required_major, 'keywords':required_keywords}\n return(all_job)\n\nlks = job_df_careerbuilder['job_link']\nll = [link for link in lks]\n# print(len(ll))\n\nimport multiprocessing as mp\n# print(mp.cpu_count()) #4\nif __name__ == '__main__':\n pool = mp.Pool(processes = 8)\n results = pool.map(scrape_job, ll)\n pool.close()\n pool.join()\n\n# print(results)\n# print(len(results))\n\njob_type = [d['type'] for d in results]\njob_skills = [d['skills'] for d in results]\njob_edu = [d['edu'] for d in results]\njob_major = [d['major'] for d in results]\njob_keywords = [d['keywords'] for d in results]\n\njob_df_careerbuilder['job_type'] = job_type\njob_df_careerbuilder['job_skills'] = job_skills\njob_df_careerbuilder['job_edu'] = job_edu\njob_df_careerbuilder['job_major'] = job_major\njob_df_careerbuilder['job_keywords'] = job_keywords\nprint(job_df_careerbuilder.shape)\n\nnow = datetime.datetime.now()\nnow_str = now.strftime(\"%m%d%Y\")\ndir_str = '/Users/chou/Google Drive/websites/github/web_scraping/data/' + now_str + '/'\n\nif not os.path.exists(dir_str):\n os.makedirs(dir_str)\n\njob_df_careerbuilder.to_csv(dir_str + input_job +'_job_df_careerbuilder.csv')\n", "sub_path": "careerbuilder_fast.py", "file_name": "careerbuilder_fast.py", "file_ext": "py", "file_size_in_byte": 11981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 69, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 70, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 174, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 176, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 181, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 183, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 185, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 187, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 189, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 191, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 192, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 193, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 198, "usage_type": "call"}, {"api_name": "re.search", "line_number": 201, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 207, "usage_type": "call"}, {"api_name": "re.search", "line_number": 210, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 216, "usage_type": "call"}, {"api_name": "re.search", "line_number": 219, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 225, "usage_type": "call"}, {"api_name": "re.search", "line_number": 228, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 234, "usage_type": "call"}, {"api_name": "re.search", "line_number": 237, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 286, "usage_type": "call"}]}
+{"seq_id": "296929602", "text": "\"\"\"Animation of the trajectories of artists using matplotlib.\nThe Animator class takes a tuple of pairs, each pair corresponds to an\nartist. Each pair (artist) contains the trajectory, as a list, in the\nx and y directions, respectively. Both lists are parametrized using the\nsame variable (typically time).\nThe trajectory followed by the artists is assumed to be in two spatial\ndimensions.\nWith this tuple of tuples the class is capable of showing an animation\nof all of the artists in a frame set using matplotlib's pyplot. This\nclass is very useful when animating physical systems such as planets,\nballs, charges, etc. Specially if you do not want to worry about little\ndetails like axes, titles, labels, etc.\nFor more details on how to use matplotlib to do animations, visit\nhttps://matplotlib.org/api/animation_api.html\nClasses\n-------\nAnimator : Sets, runs or saves animations of a tuple of artists.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nclass Animator:\n \"\"\"Set, run or save animations of artists given their trajectories.\n Attributes\n ----------\n artists : tuple of pairs\n Time step of the integration. It does not only mean time, it is\n just the independent variable of the differential equation.\n art_num : int\n Number of artists. It is the len(self.artists).\n fig : matplotlib.figure.Figure\n Figure that will frame the animation.\n ax : matplotlib.axes._subplots.AxesSubplot\n Axes related to self.fig.\n lines : list of matplotlib.lines.Line2D\n Trajectories or lines to be drawn, one for each artist.\n points : list of matplotlib.lines.Line2D\n The beginning of the trajectory of each artist is represented\n with a point in the Figure. Each pair of lists contains only\n one data in each list.\n time_template : str\n Template that saves the current time of the simulation. It is\n passed over to self.time_text so it can be printed in the\n Figure. It specifies the format in which the time will be printed.\n time_text : matplotlib.text.Text\n Text that will show the current time of the simulation in the\n Figure using the information provided by self.time_template.\n \"\"\"\n\n def __init__(self, objs): # file=None\n \"\"\"Construct an Animator instance given a tuple of artists.\n objs - tuple of pairs to be drawn (artists trajectories).\n \"\"\"\n self.artists = objs\n self.art_num = len(objs)\n self.fig = self.ax = None\n self.lines, self.points = [], []\n self.time_template = self.time_text = None\n\n def setup_anime(self, xmin_off=0, ymin_off=0, xmax_off=0, ymax_off=0):\n \"\"\"Set up the animation.\n xmin_off - offset for the xmin limit calculated below.\n ymin_off - offset for the ymin limit calculated below.\n xmax_off - offset for the xmax limit calculated below.\n ymax_off - offset for the ymax limit calculated below.\n First, it finds out the limits of the Figure, setting up the\n figure, axes, background color of plot, etc.\n Second, sets up the color for the trajectory of each artist and\n appends the plot line to self.lines. Then, something similar is\n done for self.points.\n Finally, the time_template is defined and the text that will\n print the current time is set.\n \"\"\"\n xtremes = [(min(x), min(y), max(x), max(y)) for x, y in self.artists]\n xmin = min(map(lambda lst: lst[0], xtremes)) + xmin_off\n ymin = min(map(lambda lst: lst[1], xtremes)) + ymin_off\n xmax = max(map(lambda lst: lst[2], xtremes)) + xmax_off\n ymax = max(map(lambda lst: lst[3], xtremes)) + ymax_off\n print(\"Xtremes:\", xmin, xmax, ymin, ymax)\n\n self.fig = plt.figure()\n self.ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax),\n autoscale_on=False)\n self.ax.set_facecolor('k')\n self.ax.set(xlabel='x [a.u.]', ylabel='y [a.u.]',\n title='Projectile motion')\n self.ax.set_aspect('equal')\n self.ax.grid()\n\n for a in range(self.art_num):\n ln, = self.ax.plot([], [], '--')\n ln.set_clip_on(False)\n self.lines.append(ln)\n\n plt.gca().set_prop_cycle(None)\n\n for a in range(self.art_num):\n pt, = self.ax.plot([], [], 'o')\n pt.set_clip_on(False)\n self.points.append(pt)\n\n self.time_template = 'time = %d a.u.'\n self.time_text = self.ax.text(.5, .5, '', color='c',\n transform=self.ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center')\n\n def init_anime(self):\n \"\"\"Initialize animation, used to draw a clear frame.\n It will be passed over to the parameter init_func defined in\n matplotlib.animation.FuncAnimation.\n \"\"\"\n for a in range(self.art_num):\n self.lines[a].set_data([], [])\n self.points[a].set_data([], [])\n self.time_text.set_text('')\n return self.lines + self.points + [self.time_text]\n\n def animate(self, idx):\n \"\"\"Initialize animation, used to draw a clear frame.\n idx - argument will be the next value in frames.\n It will be passed over as the function to call at each frame\n defined as func in matplotlib.animation.FuncAnimation.\n \"\"\"\n for a in range(self.art_num):\n if idx < len(self.artists[a][0]):\n xc, yc = self.artists[a][0][idx], self.artists[a][1][idx]\n self.lines[a].set_data(self.artists[a][0][:idx],\n self.artists[a][1][:idx])\n self.points[a].set_data(xc, yc)\n self.time_text.set_text(self.time_template % idx)\n return self.lines + self.points + [self.time_text]\n\n def run_anime(self, inval=10, rep=True, blitit=False):\n \"\"\"Invoke matplotlib.animation.FuncAnimation and display animation.\n inval - delay between frames in milliseconds (default 200).\n rep - whether to repeat the animation in repeated (default True).\n blitit - controls whether blitting is used to optimize drawing\n (default False).\n \"\"\"\n ani = animation.FuncAnimation(self.fig, self.animate,\n len(self.artists[0][0]), repeat=rep,\n interval=inval, blit=blitit,\n init_func=self.init_anime)\n plt.show()\n\n def save_anime(self, filename, inval=10, rep=True, blitit=False):\n \"\"\"Invoke matplotlib.animation.FuncAnimation and save animation.\n inval - delay between frames in milliseconds (default 200).\n rep - whether to repeat the animation in repeated (default True).\n blitit - controls whether blitting is used to optimize drawing\n (default False).\n Notice that the animation is saved using imagemagick; however,\n other writers can be used. Available writers can be found calling\n animation.writers.list().\n \"\"\"\n print(animation.writers.list())\n ani = animation.FuncAnimation(self.fig, self.animate,\n len(self.artists[0][0]), repeat=rep,\n interval=inval, blit=blitit,\n init_func=self.init_anime)\n ani.save(filename, writer='imagemagick', fps=inval)\n\n\nif __name__ == \"__main__\":\n anime = Animator((([0, 2, 4, 6], [-5, 0, 5, 10]),\n ([0, 1, 2, 3], [0, -1, -2, -3]),\n ([1, 2, 3, 4], [2, 4, 6, 8]),\n ([2, 3, 4, 5], [4, 9, 16, 25])))\n anime.setup_anime()\n anime.run_anime(inval=1000, rep=True)\n", "sub_path": "animator/animator.py", "file_name": "animator.py", "file_ext": "py", "file_size_in_byte": 7898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.animation.writers.list", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.animation.writers", "line_number": 160, "usage_type": "attribute"}, {"api_name": "matplotlib.animation", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 161, "usage_type": "name"}]}
+{"seq_id": "518050852", "text": "import numpy as np\nfrom scipy import sparse\nfrom abc import ABCMeta, abstractmethod\nfrom nltk.translate.bleu_score import sentence_bleu\n\nfrom utils.graph import Graph, INPUTS_NAME, GRADIENTS_NAME\nfrom neural_network.utils import InvalidShapeError, ModelArchitectureError, onehot_encode\nfrom .layers.core import Layer\nfrom .losses import Loss, CrossEntropyLoss, MeanSquaredLoss\nfrom .optimizers import Optimizer, Adam, SGD\n\n# loss functions\nMEAN_SQUARED = 'mse'\nCROSS_ENTROPY = 'crossentropy'\n\n# optimizers\nADAM = 'adam'\nSGD = 'sgd'\n\n# encoder-decoder input/output names\nENCODER_INPUTS_NAME = 'encoder_inputs'\nENCODER_GRADIENTS_NAME = 'encoder_gradients'\nDECODER_INPUTS_NAME = 'decoder_inputs'\nDECODER_GRADIENTS_NAME = 'decoder_gradients'\n\n# used when models call self.evaluate within self.fit\n# saves on splitting the data into batches again\nINPUT_IN_BATCHES = 'batched'\n\nclass Model(metaclass = ABCMeta):\n \"\"\"Abstract model class.\n \"\"\"\n def __init__(self):\n self._compiled = False\n self._optimizer = None\n self._loss = None \n\n def compile(self, optimizer, loss):\n self.optimizer = optimizer\n self._optimizer = self._get_optimizer(optimizer)\n self.loss = loss\n self._loss = self._get_loss(loss)\n self._compiled = True\n\n @property\n def layers(self): pass\n\n @abstractmethod\n def _define_graph(self, residual_connections = []):\n pass\n\n def _get_optimizer(self, optimizer):\n if isinstance(optimizer, str):\n if optimizer == ADAM:\n return Adam()\n elif optimizer == SGD:\n return SGD()\n raise ValueError(f'Optimizer of type {optimizer} not recognized. '\n f'Choose between Adam optimizer(\\'{ADAM}\\') '\n f'and stochastic gradient descent(\\'{SGD}\\')')\n \n elif isinstance(optimizer, Optimizer):\n return optimizer\n \n else:\n raise ValueError('Invalid optimizer. Please pass an object which inherits '\n 'the Optimizer class, or name of optimizer as string. '\n f'Supported optimizers: ({ADAM}, {SGD}).')\n \n def _get_loss(self, loss):\n if isinstance(loss, str):\n if loss == MEAN_SQUARED:\n return MeanSquaredLoss()\n elif loss == CROSS_ENTROPY:\n return CrossEntropyLoss()\n raise ValueError(f'Loss of type {loss} not recognized. '\n f'Choose between mean squared loss(\\'{MEAN_SQUARED}\\') '\n f'and cross-entropy loss(\\'{CROSS_ENTROPY}\\')')\n \n elif isinstance(loss, Loss):\n return loss\n \n else:\n raise ValueError('Invalid loss function. Please pass an object which inherits the Loss class, '\n 'or name of loss function as string. Supported loss functions: '\n f'({MEAN_SQUARED}, {CROSS_ENTROPY}).')\n\n def _set_names(self):\n nums = dict()\n for layer in self.layers:\n prefix = type(layer).__name__.lower()\n nums[prefix] = nums[prefix] + 1 if prefix in nums else 1\n \n layer._name = f'{prefix}_{nums[prefix]}'\n\nclass Sequential(Model):\n \"\"\"Linear stack of layers.\n \"\"\"\n def __init__(self, layers):\n super().__init__()\n if any([not isinstance(layer, Layer) for layer in layers]):\n raise TypeError('The added layer must be an instance of class Layer.')\n self.__layers = list(layers)\n \n self._set_names()\n self._define_graph()\n\n @property\n def layers(self): return self.__layers\n \n def fit(self, X, y, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n \n n_samples = X.shape[0]\n self.__labels = list(set(y))\n y_onehot = onehot_encode(y) if len(self.__labels) > 2 else y.reshape(-1, 1)\n X_batches = [X[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_batches = [y_onehot[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n \n for it in range(epochs):\n for batch_index in range(len(X_batches)):\n y_batch = y_batches[batch_index]\n\n # y_batch would be a sparse matrix if number of labels > 2 and y was onehot encoded\n if isinstance(y_batch, sparse.csr_matrix):\n y_batch = y_batch.toarray()\n\n self._forward(X_batches[batch_index], train_mode = True)\n self._backward(X_batches[batch_index], y_batch, train_mode = True)\n \n print(f'Epoch {it + 1}:')\n loss, accuracy = self.evaluate(X_batches, y_batches, batch_size, **{INPUT_IN_BATCHES: True})\n print(f'Training loss: {loss}, training accuracy: {accuracy}')\n\n self._optimizer.decay_lr()\n\n def evaluate(self, X, y, batch_size = 32, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n if INPUT_IN_BATCHES in kwargs and kwargs[INPUT_IN_BATCHES] == True:\n X_batches = X\n y_oh_batches = y\n\n # y_batches elements would be sparse matrices if number of labels > 2 and y was onehot encoded\n if isinstance(y_oh_batches[0], sparse.csr_matrix):\n y_batches = [oh_batch.toarray().argmax(axis=1).reshape(-1, 1) for oh_batch in y_oh_batches]\n else:\n y_batches = y\n else:\n n_samples = X.shape[0]\n X_batches = [X[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_batches = [y[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n if len(self.__labels) > 2:\n y_oh_batches = [onehot_encode(batch, num_labels=len(self.__labels)) for batch in y_batches]\n else:\n y_oh_batches = y_batches\n\n loss = 0\n accuracy = 0\n n_batches = len(X_batches)\n \n for batch_index in range(n_batches):\n X_batch = X_batches[batch_index]\n y_batch = y_batches[batch_index].reshape(-1, 1)\n\n onehot_batch = y_oh_batches[batch_index]\n\n # onehot_batch would be a sparse matrix if number of labels > 2 and y was onehot encoded\n if isinstance(onehot_batch, sparse.csr_matrix):\n onehot_batch = onehot_batch.toarray()\n\n self._forward(X_batch, train_mode = False)\n activations = self.layers[-1].activations\n \n # if there is more than one activation per sample, then the labels were onehot encoded \n if self.layers[-1].activations.shape[-1] == 1:\n y_batch = y_batch\n current_loss = self._loss.get_loss(y_batch, activations)\n predictions = np.array([self.__labels[int(np.round(activation))] for activation in activations]).reshape((activations.shape[0], -1))\n else:\n current_loss = self._loss.get_loss(onehot_batch, activations)\n predictions = np.array([self.__labels[np.argmax(activation)] for activation in activations]).reshape((activations.shape[0], -1))\n loss += current_loss\n diff = y_batch - predictions\n accuracy += 1 - (np.count_nonzero(diff) / len(y_batch))\n\n loss /= n_batches\n accuracy /= n_batches\n return loss, accuracy\n \n def _forward(self, X_batch, train_mode = True, *args, **kwargs):\n self._graph.forward(inputs_dict={INPUTS_NAME: X_batch},\n additional_params={'train_mode': train_mode},\n *args,\n **kwargs)\n\n outputs = self._graph.outputs\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n \n def _backward(self, X_batch, y_batch, train_mode = True, *args, **kwargs):\n delta = self._loss.output_deriv(y = self.layers[-1].activations, t = y_batch)\n\n self._graph.backward(gradients_dict={GRADIENTS_NAME: delta},\n additional_params = {'train_mode': train_mode},\n node_func=self._optimizer.update_weights,\n *args,\n **kwargs)\n\n def _define_graph(self, residual_connections = []):\n self.layers[0].input_edges[INPUTS_NAME] = None\n self.layers[-1].output_edges[GRADIENTS_NAME] = None\n node_connections = [(self.layers[idx], self.layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.layers) - 1)]\n node_connections += residual_connections\n\n self._graph = Graph(node_connections)\n\nclass EncoderDecoder(Model):\n def __init__(self,\n encoder_layers,\n decoder_layers,\n link_layers,\n start_of_sequence_token_id,\n end_of_sequence_token_id,\n padding_token_id = 0):\n \"\"\"Abstract encoder-decoder architecture for sequence models.\n\n Parameters: \n 'encoder_layers' - a list of layers that will comprise the encoder\n 'decoder_layers' - a list of layers that will comprise the decoder\n 'link_layers' - a list of layers linking the encoder and the decoder\n 'start_of_sequence_token_id' - the id of the start of sequence token used\n 'end_of_sequence_token_id' - the id of the end of sequence token used\n 'padding_token_id' - id of the sequence padding token used\n\n Warning: All layers in 'link_layers' must be a part of the 'decoder_layers' list and must be able to accept two inputs (from the decoder and the encoder).\n \"\"\"\n super().__init__()\n\n if any([ll not in decoder_layers for ll in link_layers]):\n raise ModelArchitectureError('\\'link_layers\\' must be a part of the \\'decoder_layers\\' list.')\n\n self.encoder_layers = encoder_layers\n self.decoder_layers = decoder_layers\n self.link_layers = link_layers\n self.start_of_sequence_token_id = start_of_sequence_token_id\n self.end_of_sequence_token_id = end_of_sequence_token_id\n self.padding_token_id = padding_token_id\n\n self._set_names()\n self._define_graph()\n\n @property\n def layers(self): return self.encoder_layers + self.decoder_layers\n\n def fit(self, encoder_inputs, decoder_inputs, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n ## merge batch_size and sequence length dimensions into one\n #encoder_inputs_flat = encoder_inputs.ravel()#encoder_inputs.reshape((sum(encoder_inputs.shape[:2]),) + encoder_inputs.shape[2:])\n #decoder_inputs_flat = decoder_inputs.ravel()#decoder_inputs.reshape((sum(decoder_inputs.shape[:2]),) + decoder_inputs.shape[2:])\n ##y_reshaped = y.reshape((-1,))\n n_samples = encoder_inputs.shape[0]\n \n self.__labels = list([0]*3459)#list(set(decoder_inputs))\n\n encoder_batches = [encoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n decoder_batches = [decoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n #y_batches = [y_reshaped[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n \n for it in range(epochs):\n for batch_index in range(len(encoder_batches)):\n self._forward(encoder_inputs = encoder_batches[batch_index],\n decoder_inputs = decoder_batches[batch_index],\n train_mode=True)\n self._backward(encoder_inputs = encoder_batches[batch_index],\n decoder_inputs = decoder_batches[batch_index],\n y_batch = onehot_encode(decoder_batches[batch_index],\n num_labels=len(self.__labels))\\\n .toarray()\\\n .reshape(decoder_batches[batch_index].shape + (len(self.__labels),)),\n train_mode = True)\n \n print(f'Epoch {it + 1}:')\n loss, bleu = self.evaluate(encoder_inputs=encoder_batches, y=decoder_batches, **{INPUT_IN_BATCHES: True})\n print(f'Training loss: {loss}, training BLEU score: {bleu}')\n\n self._optimizer.decay_lr()\n\n def evaluate(self, encoder_inputs, y, batch_size = 32, epochs = 1, *args, **kwargs):\n if not self._compiled:\n raise RuntimeError('You must compile a model before training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n\n if INPUT_IN_BATCHES in kwargs and kwargs[INPUT_IN_BATCHES] == True:\n encoder_batches = encoder_inputs\n y_batches = y\n else:\n ## merge batch_size and sequence length dimensions into one\n #encoder_inputs = encoder_inputs.ravel()\n n_samples = encoder_inputs.shape[0]\n\n encoder_batches = [encoder_inputs[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n y_reshaped = y.reshape((-1,))\n y_batches = [y_reshaped[i:i + batch_size] for i in range(0, n_samples, batch_size)]\n\n loss = 0\n bleu_score = 0\n n_batches = len(encoder_inputs)\n \n for batch_index in range(n_batches):\n encoder_batch = encoder_batches[batch_index]\n y_batch = y_batches[batch_index]\n onehot_batch = onehot_encode(y_batch, num_labels=len(self.__labels)).toarray()\n\n self._graph.clear_messages()\n self._forward(encoder_inputs=encoder_batch, decoder_inputs=y_batch, train_mode = False)\n activations = self.decoder_layers[-1].activations\n \n if self.layers[-1].activations.shape[-1] == 1:\n current_loss = self._loss.get_loss(y_batch.reshape(-1, 1), activations)\n hypotheses = np.array([self.__labels[int(np.round(activation))] for activation in activations])\n else:\n current_loss = self._loss.get_loss(onehot_batch, activations)\n hypotheses = np.array([self.__labels[np.argmax(sentence)] for activation in activations for sentence in activation]).reshape(activations.shape[:-1])\n loss += current_loss\n bleu_score += np.mean([sentence_bleu([y_batch[idx]], hypotheses[idx]) for idx in range(len(y_batch))])\n\n\n loss /= n_batches\n bleu_score /= n_batches\n return loss, bleu_score\n\n def _forward(self, encoder_inputs, decoder_inputs, train_mode = True, *args, **kwargs):\n if train_mode:\n self._graph.forward(inputs_dict={ENCODER_INPUTS_NAME: encoder_inputs, DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n\n return self._graph.outputs[0]\n else:\n outputs_all = []\n for sequence_index in range(encoder_inputs.shape[0]):\n sequence_encoder_inputs = encoder_inputs[sequence_index][None]\n \n decoder_inputs = np.hstack([np.array([[self.start_of_sequence_token_id]]), np.ones((1, encoder_inputs.shape[1] - 1)) * self.padding_token_id])\n \n self._graph.forward(inputs_dict={ENCODER_INPUTS_NAME: sequence_encoder_inputs, DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n\n # \"freeze\" encoder layers so the graph skips them in the forward iteration as they have already done their computations\n for layer in self.encoder_layers:\n layer.frozen = True\n\n output_index = 1\n outputs = self._graph.outputs[0]#.reshape(sequence_encoder_inputs.shape + (self._graph.outputs[0].shape[-1],))\n\n # recompute decoder outputs until it predicts an end-of-sequence token at element output_index\n # each time adding the output at index output_index to the decoder input\n while outputs[0][output_index].argmax() != self.end_of_sequence_token_id and output_index < encoder_inputs.shape[1] - 1:\n decoder_inputs[0, output_index] = outputs[0][output_index].argmax()\n\n # clear messages from decoder layers as they will have to be recomputed\n for layer in self.decoder_layers:\n layer.clear_child_edges()\n\n self._graph.forward(inputs_dict={DECODER_INPUTS_NAME: decoder_inputs},\n additional_params={'train_mode': train_mode})\n output_index += 1\n\n outputs_all.append(self._graph.outputs[0])\n\n # \"unfreeze\" encoder layers (see reasons for freezing above)\n for layer in self.encoder_layers:\n layer.frozen = False\n\n self._graph.clear_messages()\n\n return np.array([out[0] for out in outputs_all])\n\n def _backward(self, encoder_inputs, decoder_inputs, y_batch, train_mode = True, *args, **kwargs):\n activations = self.decoder_layers[-1].activations\n delta = self._loss.output_deriv(y = activations, t = y_batch)\n\n self._graph.backward(gradients_dict={GRADIENTS_NAME: delta},\n additional_params = {ENCODER_INPUTS_NAME: encoder_inputs,\n DECODER_INPUTS_NAME: decoder_inputs,\n 'train_mode': train_mode},\n node_func=self._optimizer.update_weights,\n *args,\n **kwargs)\n\n def _define_graph(self, residual_connections = []):\n self.encoder_layers[0].input_edges[ENCODER_INPUTS_NAME] = None\n self.decoder_layers[0].input_edges[DECODER_INPUTS_NAME] = None\n self.decoder_layers[-1].output_edges[GRADIENTS_NAME] = None\n\n enco_conn = [(self.encoder_layers[idx], self.encoder_layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.encoder_layers) - 1)]\n deco_conn = [(self.decoder_layers[idx], self.decoder_layers[idx + 1], (INPUTS_NAME, GRADIENTS_NAME)) for idx in range(0, len(self.decoder_layers) - 1)]\n link_conn = [(self.encoder_layers[-1], link, (ENCODER_INPUTS_NAME, GRADIENTS_NAME)) for link in self.link_layers]\n node_connections = enco_conn + deco_conn + link_conn + residual_connections\n\n self._graph = Graph(node_connections)\n\n", "sub_path": "neural_network/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 19457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "optimizers.SGD", "line_number": 18, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 30, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 48, "usage_type": "name"}, {"api_name": "optimizers.Adam", "line_number": 55, "usage_type": "call"}, {"api_name": "optimizers.SGD", "line_number": 56, "usage_type": "name"}, {"api_name": "optimizers.SGD", "line_number": 57, "usage_type": "call"}, {"api_name": "optimizers.SGD", "line_number": 60, "usage_type": "name"}, {"api_name": "optimizers.Optimizer", "line_number": 62, "usage_type": "argument"}, {"api_name": "optimizers.SGD", "line_number": 68, "usage_type": "name"}, {"api_name": "losses.MeanSquaredLoss", "line_number": 73, "usage_type": "call"}, {"api_name": "losses.CrossEntropyLoss", "line_number": 75, "usage_type": "call"}, {"api_name": "losses.Loss", "line_number": 80, "usage_type": "argument"}, {"api_name": "layers.core.Layer", "line_number": 101, "usage_type": "argument"}, {"api_name": "layers.core", "line_number": 101, "usage_type": "name"}, {"api_name": "layers.core", "line_number": 103, "usage_type": "argument"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 127, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 127, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 149, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 149, "usage_type": "name"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 173, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 196, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 210, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 217, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 218, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 219, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 219, "usage_type": "name"}, {"api_name": "utils.graph.Graph", "line_number": 222, "usage_type": "call"}, {"api_name": "neural_network.utils.ModelArchitectureError", "line_number": 247, "usage_type": "call"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 286, "usage_type": "call"}, {"api_name": "neural_network.utils.onehot_encode", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 335, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.sentence_bleu", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 386, "usage_type": "call"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 392, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 403, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 405, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 405, "usage_type": "name"}, {"api_name": "utils.graph.INPUTS_NAME", "line_number": 406, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 406, "usage_type": "name"}, {"api_name": "utils.graph.GRADIENTS_NAME", "line_number": 407, "usage_type": "name"}, {"api_name": "utils.graph.Graph", "line_number": 410, "usage_type": "call"}]}
+{"seq_id": "14428372", "text": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport sys\nsys.path.append('..')\nfrom models import *\nfrom utils import Logger\nimport numpy as np\nimport progressbar\nimport time\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch CIFAR model Train')\nparser.add_argument('--batch-size', type=int, default=256, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=512, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate (default: 0.1)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--alpha', type=float, default=2.0, help='Orthogonality of the weight') # 2.0-7.5\nparser.add_argument('--delta', type=float, default=0.1, help='change of args of weight') # 0.08 0.1 0.12\n# delta 0.08-4 0.1-6 0.12-7\nargs = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\nelse:\n print(\"No cuda participate.\")\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('../data', train=False, transform=transforms.Compose([transforms.ToTensor(),])),\n batch_size=args.test_batch_size, shuffle=False, **kwargs)\n\n\nmodel = Lsoftmax_VGG16(margin=1)\nif args.cuda:\n model.cuda()\nfilename = '../checkpoint/vgg16/vgg16_lsoftmax.pth'\ncheckpoint = torch.load(filename)\nmodel.load_state_dict(checkpoint)\n\nwith torch.no_grad():\n for name, parameters in model.named_parameters():\n print(name,':',parameters.size())\n if name == 'lsoftmax_linear.weight': weight = parameters.cpu().numpy()\n\n\ndef inference():\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n # bar = progressbar.ProgressBar(max_value=10000//args.test_batch_size + 1)\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n _, output = model(data)\n test_loss += F.cross_entropy(output, target, reduction='sum').item()\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n # bar.update(bi)\n # bar.finish()\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * float(correct) / len(test_loader.dataset)))\n print(f'weight_size: {weight.shape}')\n np.save('../weight_vector/vgg16/weight_lsoftmax', weight)\n\nif __name__=='__main__':\n inference()", "sub_path": "lab_vgg16/get_weight.py", "file_name": "get_weight.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 54, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 54, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "35034945", "text": "#!/usr/bin/env python3\nfrom unittest import TestCase, mock\nimport pytest\nimport mailroom_v4\nimport unittest\nfrom io import StringIO\nfrom testfixtures import tempdir, compare\nimport os\n\n\n@pytest.mark.parametrize('name, amount, expected', [\n (\"dan\", '50', \"Thank you dan for donating 50 dollars generously.\"),\n (\"jeff\", '60', \"Thank you jeff for donating 60 dollars generously.\")\n])\ndef test_thank_you_letter_positive(name, amount, expected):\n result = str(mailroom_v4.thank_you_letter(name, amount))\n assert expected == result\n\n\n@pytest.mark.parametrize('name, amount, expected', [\n (\"dan\", 50, \"Thank you sam for donating 50 dollars generously.\"),\n (\"jeff\", 60, \"Thank you for donating 60 dollars generously.\")\n])\ndef test_thank_you_letter_negitive(name, amount, expected):\n result = str(mailroom_v4.thank_you_letter(name, amount))\n assert expected != result\n\n\ntesting_donors_data = {\"testname1\": [200, 20, 35.5],\n \"testname2\": [500, 20],\n \"Susan\": [1000, 20, 70],\n \"Rob\": [250, 20],\n }\n\n\n@unittest.mock.patch('mailroom_v4.donor_details')\ndef test_donor_details(mock_donor_details):\n mailroom_v4.donor_details(testing_donors_data)\n mock_donor_details.assert_called_with(testing_donors_data)\n\n\ndef test_amount_validate_positive():\n assert mailroom_v4.amount_validate(float(20))\n\n\n@unittest.mock.patch('mailroom_v4.amount_validate')\ndef test_amount_validate_negitive(mock_amount_validate):\n mailroom_v4.amount_validate(-10)\n mock_amount_validate.assert_called_with(-10)\n\n\n@unittest.mock.patch('mailroom_v4.update_data_print_thanks')\ndef test_thank_you(mock_update_data_print_thanks):\n mailroom_v4.update_data_print_thanks(float(10), \"name1\")\n assert mock_update_data_print_thanks.called\n\n\n@unittest.mock.patch('sys.stdout', new_callable=StringIO)\ndef test_create_report(mock_stdout,):\n mailroom_v4.create_report()\n assert mock_stdout.getvalue() == '''Donor Name | Total Given |Num Gifts | Aver\n------------------------------------------------------------------------------------------\nJohn $ 255.5 3 $ 85.17\nJeff $ 520 2 $ 260.0\nSusan $ 1090 3 $ 363.3\nRob $ 270 2 $ 135.0\nRoss $ 200 1 $ 200.0\\n'''\n\n\n@unittest.mock.patch('mailroom_v4.send_letters_all')\ndef test_send_letters_all_call(mock_send_letters_all_call):\n test_send_donors_data = {\"testname3\": [200, 20, 35.5],\n \"testname2\": [500, 20],\n \"Susan\": [1000, 20, 70],\n \"Rob\": [250, 20],\n }\n mailroom_v4.send_letters_all(**test_send_donors_data)\n assert mock_send_letters_all_call.called\n #compare(dir.read('Susan.txt'), b'some thing')\n\n\ntest_send_letters_all_call()\n\n\n# @unittest.mock.patch('mailroom_v4.send_letters_all')\n# def test_send_letters_all(mock_test_send_letters_all_call):\n# test_send_donors_data = {\"testname3\": [200, 20, 35.5],\n# \"testname2\": [500, 20],\n# \"Susan\": [1000, 20, 70],\n# \"Rob\": [250, 20],\n# }\n# print(mock_test_send_letters_all_call(**test_send_donors_data))\n# print(mailroom_v4.send_letters_all(**test_send_donors_data))\n#\n# assert os.path.isfile(\"testname3.txt\") == 1\n\n\n\n#test_send_letters_all\n", "sub_path": "students/g_rama/lesson06/test_mailroom_v4.py", "file_name": "test_mailroom_v4.py", "file_ext": "py", "file_size_in_byte": 3661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "mailroom_v4.thank_you_letter", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mailroom_v4.thank_you_letter", "line_number": 25, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "mailroom_v4.donor_details", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 36, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 36, "usage_type": "attribute"}, {"api_name": "mailroom_v4.amount_validate", "line_number": 43, "usage_type": "call"}, {"api_name": "mailroom_v4.amount_validate", "line_number": 48, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 46, "usage_type": "attribute"}, {"api_name": "mailroom_v4.update_data_print_thanks", "line_number": 54, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 52, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mailroom_v4.create_report", "line_number": 60, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 58, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 58, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 58, "usage_type": "name"}, {"api_name": "mailroom_v4.send_letters_all", "line_number": 77, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 70, "usage_type": "attribute"}]}
+{"seq_id": "45340386", "text": "import unittest\nimport gzip\n\nfrom pkg_resources import resource_filename\nfrom testtools import TestCase\nfrom testtools.matchers import *\n\nfrom propertysuggester.test.parser.test_abstract_reader import AbstractUniverseTest\nfrom propertysuggester.parser import XmlReader\nfrom propertysuggester.utils.datamodel import Claim, Snak, Entity\n\nclass XmlReaderTest(AbstractUniverseTest):\n def test_universe(self):\n with gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\"), \"r\") as f:\n result = list(XmlReader.read_xml(f))\n self.assert_universe(result)\n\n def test_updated_dump(self):\n with gzip.open(resource_filename(__name__, \"Wikidata-Q9351.xml.gz\"), \"r\") as f:\n result = list(XmlReader.read_xml(f))\n\n self.assertThat(len(result), Equals(1))\n q9351 = result[0]\n self.assertThat(q9351.title, Equals(\"Q9351\"))\n self.assertThat(q9351.claims, Contains(Claim(Snak(156, \"wikibase-item\", \"Q1647331\"))))\n self.assertThat(q9351.claims, Contains(Claim(Snak(1112, \"quantity\", \"+25\"))))\n\n def test_special_cases(self):\n self.assertThat(XmlReader._process_json((\"Q1\", \"{}\")), Equals(Entity(\"Q1\", [])))\n self.assertThat(XmlReader._process_json((\"Q1\", '{\"claims\":[{\"m\":[\"value\",\"\",\"bad\"], \"refs\":[],\"q\":[]}]}')),\n Equals(Entity(\"Q1\", [])))\n self.assertThat(XmlReader._process_json((\"Q1\", '{\"claims\":[{\"m\":[\"value\",\"\",\"unknown\"], \"refs\":[],\"q\":[]}]}')),\n Equals(Entity(\"Q1\", [])))\n\nclass MultiprocessingBigTest(TestCase):\n def test_simple_multiprocessing(self):\n r1 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\")), 1))\n r4 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-Q1.xml.gz\")), 4))\n\n self.assertThat(r1, HasLength(1))\n self.assertThat(r4, Equals(r1))\n\n def test_multiprocessing(self):\n r1 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-20131129161111.xml.gz\")), 1))\n r4 = list(XmlReader.read_xml(gzip.open(resource_filename(__name__, \"Wikidata-20131129161111.xml.gz\")), 4))\n\n self.assertThat(r1, HasLength(87))\n self.assertThat(r4, Equals(r1))\n\nif __name__ == '__main__':\n unittest.main()\n\n", "sub_path": "propertysuggester/test/parser/test_xml_reader.py", "file_name": "test_xml_reader.py", "file_ext": "py", "file_size_in_byte": 2293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "propertysuggester.test.parser.test_abstract_reader.AbstractUniverseTest", "line_number": 12, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 14, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 14, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 15, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 15, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 19, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 19, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 20, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 20, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Claim", "line_number": 25, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Snak", "line_number": 25, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Claim", "line_number": 26, "usage_type": "call"}, {"api_name": "propertysuggester.utils.datamodel.Snak", "line_number": 26, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 29, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 29, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 29, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 30, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 30, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 31, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader._process_json", "line_number": 32, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 32, "usage_type": "name"}, {"api_name": "propertysuggester.utils.datamodel.Entity", "line_number": 33, "usage_type": "call"}, {"api_name": "testtools.TestCase", "line_number": 35, "usage_type": "name"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 37, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 37, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 37, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 37, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 38, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 38, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 38, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 38, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 44, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 44, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 44, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 44, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader.read_xml", "line_number": 45, "usage_type": "call"}, {"api_name": "propertysuggester.parser.XmlReader", "line_number": 45, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 45, "usage_type": "call"}, {"api_name": "pkg_resources.resource_filename", "line_number": 45, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "610330026", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 19 11:33:08 2020\n\n@author: ramravi\n\"\"\"\n\n#importing the necessary libraries\n \nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom pandas import plotting\n\n#for visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use('fivethirtyeight')\n\n#for interactive visualizations\nimport plotly.offline as py\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\nfrom plotly import tools\ninit_notebook_mode(connected=True)\nimport plotly.figure_factory as ff\n\n#importing the dataset\ndata= pd.read_csv('mallcustomersegmentation.csv')\n\ndat=ff.create_table(data.head())\n\npy.iplot(dat)\n\n\ndata.describe()\n\n#checking if there is null data\ndata.isnull().any().any()\n\n#plotting the andrews_curve\nplt.rcParams['figure.figsize']=(15,10)\n\nplotting.andrews_curves(data.drop('CustomerID', axis=1), 'Gender')\nplt.title('Andrew curves for gender', fontsize=20)\nplt.show()\n\n# the andrews curve preserves the means, distance(up to a constant) adn variances. \n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nplt.rcParams['figure.figsize']=(18,8)\n\nplt.subplot(1,2,1)\nsns.set(style='whitegrid')\nsns.distplot(data['Annual Income (k$)'])\nplt.title('annual income distribution', fontsize=20)\nplt.xlabel('Range of Annual Income')\nplt.ylabel('Count')\n\nplt.subplot(1,2,2)\nsns.set(style='whitegrid')\nsns.distplot(data['Age'], color='red')\nplt.title('Distribution of Age', fontsize=20)\nplt.xlabel('Range of age')\nplt.ylabel('count')\n\n# we can infer one thing that There are few people who earn more than 100 US Dollars. Most of the people have an earning of around 50-75 US Dollars. Also, we can say that the least Income is around 20 US Dollars.\n \n# Taking inferences about the Customers.\n\n# The most regular customers for the Mall has age around 30-35 years of age. Whereas the the senior citizens age group is the least frequent visitor in the Mall. Youngsters are lesser in umber as compared to the Middle aged people.\n\n\nlabels=['Female','Male']\nsize=data['Gender'].value_counts()\ncolors=['lightgreen', 'orange']\nexplode=[0,0.1]\n\nplt.rcParams['figure.figsize']=(9,9)\nplt.pie(size, explode=explode, labels=labels, autopct='%.2f%%', shadow=True)\nplt.title('Gender Pie distribution')\nplt.axis('off')\nplt.legend()\nplt.show()\n\n#if you can see the pie chart, it is clear that female gender leads the male count by atleast 56%\n# that is a huge gap specially when the population of Males is comparatively higher than females\n\nplt.rcParams['figure.figsize'] = (15, 8)\nsns.countplot(data['Age'], palette = 'hsv')\nplt.title('Distribution of Age', fontsize = 20)\nplt.show()\n\n#This graph shows a more interactive chart about the distribution of each Age grou in the mall.\n#it is seen that the ages from 27 to 39 are very much frequent but there is no clear pattern. Interesting Fact, There are equal no. of Visitors in the Mall for the Agee 18 and 67. People of Age 55, 56, 69, 64 are very less frequent in the Malls. People at Age 32 are the Most Frequent Visitors in the Mall.\n\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.countplot(data['Annual Income (k$)'], palette='hsv')\nplt.title('Distribution of Annual Income', fontsize=25)\nplt.show()\n\n#Interesting Fact, There are equal no. of Visitors in the Mall for the Agee 18 and 67. People of Age 55, 56, 69, 64 are very less frequent in the Malls. People at Age 32 are the Most Frequent Visitors in the Mall.\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.countplot(data['Spending Score (1-100)'], palette='copper')\nplt.title('Distribution of Spending score', fontsize=25)\nplt.show()\n\n#this is the most important chart of all. \n#This shows that the mall has a variety of customers coming in since the chart here shows a spending score from 1 till 99. This shoes that the mall caters to the needs of different class of poeple. However, the most cutomers spending score lies between 35-60.\n\nsns.pairplot(data)\nplt.title('Paiplot for the data', fontsize=20)\nplt.show()\n\n# This shows the relationship between each feature variable with itself and with the other variables in the table. This helps in finding the hidden relationship between the chosen variable(target) and the other important features selected.\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.heatmap(data.corr(), cmap='Wistia', annot=True)\nplt.title('Correlation matrix')\nplt.show()\n\n#If you can see the matrix, the features does not have any good correlation, thus proceeding with all the features.\n\n#Bi-Variate Analysis\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.boxenplot('Gender','Spending Score (1-100)',data=data,palette='Blues')\nplt.title('Bi-Variate Analysis of gender and spending score')\nplt.show()\n\n\n#This shows the spending score of male is around 25k to 70k whearas the female gender has a spending score of 35k to 75k.This shows the clear domination of female gender in the shopping arena!\n\nplt.rcParams['figure.figsize']=(15,8)\nsns.boxplot('Gender', 'Annual Income (k$)', data=data, palette='rainbow')\nplt.title('Bivariate analysis Gender vs Annual Income', fontsize=20)\nplt.show()\n\n#This is that the male has higher average salary than the female gender, while if you compare lower income, both the gender is almost equal.\n\nx=data['Annual Income (k$)']\ny=data['Age']\nz=data['Spending Score (1-100)']\n\nsns.lineplot(x,y,color='blue')\nsns.lineplot(x,z,color='pink')\nplt.title('Multivariate anaysis of age vs annual income vs spending score')\nplt.show()\n\n#the above chart shows the relationship between age and annula income and also annual income and spending score.\n\n#Clustering analysis\nx=data.iloc[:,[3,4]].values\n\n\n#k means Algorithm\n\n#elbow method to find the number of optimum clusters\nfrom sklearn.cluster import KMeans\nwcss=[]\nfor i in range(1,11):\n km=KMeans(n_clusters=i, init='k-means++',max_iter=300,\n n_init=10, random_state=0)\n km.fit(x)\n wcss.append(km.inertia_)\n \nplt.plot(range(1,11), wcss)\nplt.title('The elbow method', fontsize=20)\nplt.xlabel('No of clusters')\nplt.ylabel('wcss')\nplt.show()\n\n#visualizing the clusters\nkm=KMeans(n_clusters=5, init='k-means++', max_iter=300,\n n_init=10, random_state=0)\ny_means=km.fit_predict(x)\n\nplt.scatter(x[y_means==0,0], x[y_means==0,1], \n s=100, c='pink', label='misser')\nplt.scatter(x[y_means==1,0], x[y_means==1,1], s=100, c='yellow',\n label='general')\nplt.scatter(x[y_means==2,0], x[y_means==2,1], s=100, c='cyan', \n label='target')\nplt.scatter(x[y_means==3,0], x[y_means==3,1], s=100, c='magenta',\n label='spendthrift')\nplt.scatter(x[y_means==4,0], x[y_means==4,1],s=100, c='orange',\n label='careful')\nplt.scatter(km.cluster_centers_[:,0],km.cluster_centers_[:,1], s=50, c='blue', label='centeriod')\n\n\nplt.style.use('fivethirtyeight')\nplt.title('K means Clsutering', fontsize=20)\nplt.xlabel('Annaul Income')\nplt.ylabel('Spending score')\nplt.legend()\nplt.grid()\nplt.show()\n\n\n#there are five segments in the mall and the label explains them in breifly.The mall authorities have to take care of the careul categories to avail some benefits so that they move to the general category.\n\n\n#Hierarchial Clustering\n\n#using dendograms\n\nimport scipy.cluster.hierarchy as sch\ndendogram=sch.dendrogram(sch.linkage(x, method='ward'))\nplt.title('dendogram',fontsize=20)\nplt.xlabel('customers')\nplt.ylabel('Ecuclidian Distance')\nplt.show()\n\n\n\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nhc=AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')\ny_hc=hc.fit_predict(x)\n\nplt.scatter(x[y_hc==0,0], x[y_hc==0,1], s=100, c='pink', label='misser')\nplt.scatter(x[y_hc==1,0], x[y_hc==1,1], s=100, c='yellow', label='general')\nplt.scatter(x[y_hc==2,0], x[y_hc==2,1], s=100, c='orange', label='target')\nplt.scatter(x[y_hc==3,0], x[y_hc==3,1], s=100, c='magenta', label='spendthrift')\nplt.scatter(x[y_hc==4,0], x[y_hc==4,1], s=100, c='cyan', label='careful')\nplt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s=100, c='blue',label='centroid')\n\nplt.style.use('fivethirtyeight')\nplt.title('Cluster analysis-hierarchial Clustering', fontsize=20)\nplt.xlabel('Annual income')\nplt.ylabel('spending score (1-100)')\nplt.legend()\nplt.grid()\nplt.show()\n\n#age and spending score:\n \n \nx= data.iloc[:,[2,4]].values\n\nwcss=[]\nfor i in range(1,11):\n km=KMeans(n_clusters=i, init='k-means++', n_init=10, max_iter=300, random_state=0)\n km.fit(x)\n wcss.append(km.inertia_)\n \nplt.plot(range(1,11),wcss)\nplt.title('The elbow method', fontsize=20)\nplt.xlabel('No of clusters')\nplt.ylabel('wcss')\nplt.show()\n\nkm=KMeans(n_clusters=4, init='k-means++', max_iter=300,\n n_init=10, random_state=0)\ny_means=km.fit_predict(x)\n\nplt.scatter(x[y_means==0,0], x[y_means==0,1], \n s=100, c='pink', label='target customer')\nplt.scatter(x[y_means==1,0], x[y_means==1,1], s=100, c='yellow',\n label='priority')\nplt.scatter(x[y_means==2,0], x[y_means==2,1], s=100, c='cyan', \n label='usual customer')\nplt.scatter(x[y_means==3,0], x[y_means==3,1], s=100, c='magenta',\n label='target old customer')\nplt.scatter(km.cluster_centers_[:,0],km.cluster_centers_[:,1], s=50, c='blue', label='centeriod')\n\n\nplt.style.use('fivethirtyeight')\nplt.title('K means Clustering', fontsize=20)\nplt.xlabel('Age')\nplt.ylabel('Spending score')\nplt.legend()\nplt.grid()\nplt.show()\n\n#the age and spending score by looking at the above chart, we have the usual customer spread over all ages. And we also have the target customers with young and old ages.Then after getting the results we can accordingly make different marketing strategies and policies to optimize the spending scores of the customer in the Mall.\n\nx=data[['Age','Spending Score (1-100)', 'Annual Income (k$)']].values\nkm=KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init= 10, random_state=0)\nkm.fit(x)\nlabels=km.labels_\ncentroids=km.cluster_centers_\n\ndata['labels']= labels\ntrace1= go.Scatter3d(\n x= data['Age'],\n y=data['Spending Score (1-100)'],\n z=data['Annual Income (k$)'],\n mode='markers',\n marker=dict(\n color=data['labels'],\n size=10,\n line=dict(\n color=data['labels'],\n width=12\n ),\n opacity=0.8\n )\n )\ndf=[trace1]\n\nlayout= go.Layout(\n title='Character vs Gender vs Alive or not',\n margin=dict(\n l=0,\n r=0,\n b=0,\n t=0\n ),\n scene=dict(\n xaxis=dict(title='Age'),\n yaxis=dict(title='Spending Score'),\n zaxis=dict(title='Annual Income')\n )\n )\nfig=go.Figure(data=df, layout=layout)\npy.offline.plot(fig)\n\n# this is a multivariate analysis of age vs annual income vs spending score.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "mallcustomersegmentation.py", "file_name": "mallcustomersegmentation.py", "file_ext": "py", "file_size_in_byte": 10827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "plotly.offline.init_notebook_mode", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "plotly.figure_factory.create_table", "line_number": 31, "usage_type": "call"}, {"api_name": "plotly.figure_factory", "line_number": 31, "usage_type": "name"}, {"api_name": "plotly.offline.iplot", "line_number": 33, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.plotting.andrews_curves", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.plotting", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 56, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 63, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 91, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 100, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 107, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "seaborn.countplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "seaborn.pairplot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 121, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 130, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "seaborn.boxenplot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 138, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 149, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 195, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.dendrogram", "line_number": 212, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy", "line_number": 212, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.linkage", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "sklearn.cluster.AgglomerativeClustering", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 233, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 273, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 284, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter3d", "line_number": 290, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 290, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 307, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 307, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 321, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 321, "usage_type": "name"}, {"api_name": "plotly.offline.offline.plot", "line_number": 322, "usage_type": "call"}, {"api_name": "plotly.offline.offline", "line_number": 322, "usage_type": "attribute"}, {"api_name": "plotly.offline", "line_number": 322, "usage_type": "name"}]}
+{"seq_id": "528547404", "text": "# -*- coding: utf-8 - Python 3.5 *-\n\"\"\"\nDescription: Reads Adcirc Global Output file fort.63 & returns time series at\nselected nodes.\nInput(s): fort.63, Nodes of interest\nOutput(s): Time series .txt files\njdorvinen@dewberry.com, slawler@dewberry.com\nCreated on Tue Apr 19 15:08:33 2016\n\"\"\"\n#---------------------------------------Load Python Modules---------------------------------------#\n#import fileinput\nfrom datetime import datetime as dt\nfrom copy import deepcopy\nimport os\nfrom NODES_LIST import NODES_LIST\nfrom TRANSECTS import TRANSECTS\nimport numpy as np\n\n#------------------------------------------User Inputs--------------------------------------------#\nPARENT_DIR = \"P:/02/LakeOntario/Storm/\"\nINPUTFILES = [\"fort.63\", \"swan_TP.63\", \"swan_HS.63\"]\nSTORM_LIST = [\"19740314\", \"19770107\", \"19800109\", \"20061026\", \"19710301\"]\nPARAMETERS = {\"fort.63\":\"SWEL\", \"swan_TP.63\":\"TPS\", \"swan_HS.63\":\"HS\"}\n\n#------------------------------------------BEGIN SCRIPT-------------------------------------------#\n\ndef extract(root):\n\n \"\"\"Extracts data from ADCIRC time series files\"\"\"\n nodes_list = deepcopy(NODES_LIST)\n for filed in INPUTFILES:\n print(\"Extracting \"+root+\"/\"+filed)\n f63 = os.path.join(root, filed) #-- 63 files\n with open(f63) as fin:\n for line in fin:\n mynode = line.strip().split(' ')[0] #--Test each line\n if mynode in nodes_list.keys():\n value = line.strip().split()[1]\n nodes_list[mynode][PARAMETERS[filed]].append(value)\n return nodes_list\n\ndef write_data(root, nodes_list):\n \"\"\" Write extracted data to files \"\"\"\n for transect in TRANSECTS:\n for node in TRANSECTS[transect]:\n filename = \"transect_{0}_node_{1}.txt\".format(transect,\n node)\n length = max([len(nodes_list[node]['SWEL']),\n len(nodes_list[node]['HS']),\n len(nodes_list[node]['TPS'])])\n timesteps = np.arange(0, length)\n with open(os.path.join(root, filename), 'w') as savefile:\n for step in timesteps:\n time = '{:>12}'.format(str((step)*1800))\n if step == 0:\n swel = '{:>24}'.format('nan')\n else:\n try:\n swel = '{:>24}'.format(nodes_list[node]['SWEL'][step-1])\n except LookupError:\n swel = '{:>24}'.format('nan')\n try:\n hsig = '{:>24}'.format(nodes_list[node]['HS'][step])\n except LookupError:\n hsig = '{:>24}'.format('nan')\n try:\n tps = '{:>24}'.format(nodes_list[node]['TPS'][step])\n except LookupError:\n tps = '{:>24}'.format('nan')\n line = time+swel+hsig+tps+\"\\n\"\n savefile.write(line)\n\n#------------------------------------------MAIN FUNCTION------------------------------------------#\ndef main():\n\n \"\"\"Main function, runs extract() funtion and times it.\"\"\"\n\n start_time = dt.now()\n print(\"\\n==========START========= \\n\")\n print('Begin extracting data:\\n')\n print(start_time)\n\n for storm in STORM_LIST:\n root = os.path.join(PARENT_DIR, storm)\n nodes_list = extract(root)\n write_data(root, nodes_list)\n\n end_time = dt.now()\n tda = str(end_time-start_time).split('.')[0].split(':')\n print(\"\\n===========END==========\\n\")\n print(\"Processing Time :\\n\")\n print(\"{0} hrs, {1} mins, {2} sec \\n\\n\".format(tda[0], tda[1], tda[2]))\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "lake_ontario/extract_from_63_list_09022016_offset_3in1.py", "file_name": "extract_from_63_list_09022016_offset_3in1.py", "file_ext": "py", "file_size_in_byte": 3796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "copy.deepcopy", "line_number": 30, "usage_type": "call"}, {"api_name": "NODES_LIST.NODES_LIST", "line_number": 30, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "TRANSECTS.TRANSECTS", "line_number": 44, "usage_type": "name"}, {"api_name": "TRANSECTS.TRANSECTS", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "name"}]}
+{"seq_id": "471922606", "text": "\"\"\"EK80 data client\n\nAuthor: Terje Nilsen, Kongsberg Maritime AS\n\"\"\"\n\nimport socket\nimport time\nimport datetime\nimport binascii\nimport threading\nimport sys\nimport requests\n\nimport xmltodict\nimport xml.etree.ElementTree as ET\nfrom struct import *\nfrom collections import namedtuple\nfrom pprint import pprint\n\n# comment out the line below to disable debug-level messages\nlogging.basicConfig(level=logging.DEBUG)\n\n# PS: Enabling debug output might in som cases delay the handling and cause errors.\n# If you start to get lost messages, disable debug and retest.\n# If this helps, then remove some output messages in the EK80_data function.\n# The EK80_data function is time critical...\n\ndef bytes_to_int(bs):\n \"\"\"Convert a byte string to int(16)\n \"\"\"\n return int(bs[0]) + int(bs[1]*256)\n\nclass t9ek80:\n \"\"\"\n \"\"\"\n#----------------------------------------------------------------------------\n# Method report\n# Description User defined REPORT function, this is to be adapter to individual needs.\n# It receives a list for parameters and meta data to process...\n#-----------------------------------------------------------------------------\n# For motion simulation only, to be removed...\n def __init__(self, argv):\n\n self.error = 0 # Class Error handler...\n\n # Data that will be read from the xml file...\n # PS: These walues will be overwritten...\n self.UDP_IP = \"127.0.0.1\"\n self.UDP_PORT = 37655\n self.UDP_DATA = 0\n self.desimate = 0\n\n self.NMEA_DATA = 0 # Will be set by the XML handler...\n\n # KDI_TCP_IP = \"127.0.0.1\"\n # KDI_TCP_PORT = 55035\n # USEKOGNIFAI = 0 #True\n\n self.Status_Command = 1\n self.Status_Data = 2\n self.Status_NMEA = 4\n self.Status_Done = 8\n self.Status_Running = 16\n\n # globale variable\n self.client_seq_no = 1\n self.mtypeName = \"\"\n self.itypeVal = \"\"\n self.itypeSize = 0\n self.EK_req = \"\"\n self.EK_Value = 0\n self.EK_Type = \"\"\n self.desimated = 0\n self.finale_data = b\"\"\n self.mtype = \"\"\n self.running = 0 # 0x1FF when all prosesses running...\n self.totalbytes = 0\n\n self.config = \"config.xml\"\n self.busy = 0\n self.mode = -1\n self.cont = False\n\n self.debug = self.getDebug()\n\n #FIXME: the code below should be moved to a load_config() method\n\n # Get extra parameters...\n if len(argv) == 3:\n self.mode = int(argv[2])\n\n # count the arguments\n if len(argv) < 2:\n print(\"Usage: python3 tescast.py config.xml [transponder]\")\n self.error = -1\n else:\n print(\"Initializes config file: \"+argv[1])\n arguments = len(argv)\n if arguments >= 2:\n config = argv[1]\n\n # Open the default channel...\n tree = ET.parse(config)\n root = tree.getroot()\n\n for table in root.iter('Configuration'):\n for child in table:\n if child.tag == 'EK80':\n for child2 in child:\n if child2.tag == 'EK80_IP':\n self.UDP_IP = child2.text\n if child2.tag == 'EK80_PORT':\n self.UDP_PORT = int(child2.text)\n if child2.tag == 'EK80_DATA':\n self.UDP_DATA = int(child2.text)\n if child2.tag == 'NMEA_DATA':\n self.NMEA_DATA = int(child2.text)\n if child2.tag == 'DESIMATE':\n self.desimate = int(child2.text)\n\n # if child.tag == 'Cloud':\n # for child2 in child:\n # if child2.tag == 'KDI_TCP_IP':\n # self.KDI_TCP_IP = child2.text\n # if child2.tag == 'KDI_TCP_PORT':\n # self.KDI_TCP_PORT = int(child2.text)\n # if child2.tag == 'USEKOGNIFAI':\n # self.USEKOGNIFAI = int(child2.text)\n\n if child.tag == 'Request':\n for child2 in child:\n if child2.tag == 'req':\n self.EK_req = child2.text\n if child2.tag == 'req2':\n self.EK_Value = child2.text\n if child2.tag == 'req3':\n self.EK_Type = child2.text\n if child2.tag == 'res':\n self.mtypeName = child2.text\n if child2.tag == 'resi':\n self.itypeVal = child2.text\n if child2.tag == 'ress':\n self.itypeSize = int(child2.text)\n if child2.tag == 'type':\n self.mtype = child2.text\n #----------------------------------------------------------------------------\n # Can be overide in local file...\n def getDebug(self):\n \"\"\"\n \"\"\"\n return False\n\n # Do the reporting stuff...\n def report(Payload, Decode, timenow, mtype, decimate):\n \"\"\"Incoming data handler (to be overridden by user in derived class)\n \"\"\"\n logging.warning(\"Missing interface module...\")\n\n\n def NMEAdecode(self, data):\n \"\"\"NMEA data handler (to be overridden by user in derived class)\n \"\"\"\n logging.warning(\"Missing NMEA interface module...\")\n\n\n # ----------------------------------------------------------------------------\n # Method Prepare subscription...\n # Description Adds the JSON subscription to the EK80 subscriptor.\n # Can create og change a subscription...\n #-----------------------------------------------------------------------------\n def subscribe(self, sock, ApplicationID, transponder, create):\n \"\"\"Create or change a data subscription\n \"\"\"\n self.EK_req = self.EK_req.replace(\"?\", transponder)\n logging.debug(self.EK_req)\n\n if create == True:\n self.CreateSubscription(sock, ApplicationID, self.UDP_DATA, self.EK_req)\n else:\n self.ChangeSubscription(sock, ApplicationID, self.UDP_DATA, self.EK_req)\n\n def GetParameterValue(self, sock, ApplicationID, transponder, parameter_name):\n \"\"\"\n Retrieve a parameter value\n \"\"\"\n parameter_name = parameter_name.replace(\"?\", transponder)\n\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"ParameterServer\" \\\n \"\" \\\n \"\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n def SetParameter(self, sock, ApplicationID, transponder, parameter_name, parameter_value, parameter_type):\n \"\"\"\n Set a parameter value\n \"\"\"\n parameter_name = parameter_name.replace(\"?\", transponder)\n\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"ParameterServer\" \\\n \"\" \\\n \"\" \\\n \"{:s}\" \\\n \"{:s}\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, parameter_name, parameter_value, parameter_type)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n def CreateSubscription(self, sock, ApplicationID, port, parameter_name):\n \"\"\"\n Create a data subscription\n \"\"\"\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"RemoteDataServer\" \\\n \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:s}\" \\\n \"\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no, port, parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request, encoding='utf-8')\n sock.send(request)\n self.client_seq_no = self.client_seq_no + 1\n\n #----------------------------------------------------------------------------\n # Method ChangeSubscription\n # Description Changes an existing subscription to EK80...\n #-----------------------------------------------------------------------------\n def ChangeSubscription(self, sock, ApplicationID, port, parameter_name):\n tmp = \"REQ\\0{:d},1,1\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\".format(self.client_seq_no)\n tmp = tmp[0:26]\n tmp2 = \"\" \\\n \"\" \\\n \"{:d}\" \\\n \"{:d}\" \\\n \"\" \\\n \"invokeMethod\" \\\n \"RemoteDataServer\" \\\n \"\" \\\n \">\" \\\n \"{:d}\" \\\n \"{:s}\" \\\n \">\" \\\n \"\" \\\n \"\\0\".format(ApplicationID, self.client_seq_no,ApplicationID,parameter_name)\n request = tmp + tmp2\n\n # Send the request and increase the sequence number\n request = bytes(request,encoding='utf-8')\n sock.send(request)\n self.client_seq_no=self.client_seq_no + 1\n\n #----------------------------------------------------------------------------\n # Method EK80_comunicate\n # Description Initiate a communication and data channel to the EK80...\n #-----------------------------------------------------------------------------\n def EK80_comunicate(self, port, data):\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((self.UDP_IP, port))\n sock.settimeout(5.0)\n self.running = self.running |self. Status_Command\n\n while self.running & self.Status_Running:\n\n if len(data) >= 3:\n if data[:3] == b'SI2':\n msg = bytearray(b'CON\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n elif data[:3] == b'RES':\n if data[4:7] == b'CON':\n if data[30:45] == b'ResultCode:S_OK':\n logging.debug(\"Connected\")\n\n data2 = data[46:].replace(b'AccessLevel:',b' ')\n data2 = data2.replace(b'ClientID:',b' ')\n data2 = data2.replace(b'}',b' ')\n data2 = data2.replace(b',',b' ')\n data3 = data2.split()\n ApplicationID = int(data3[1].decode())\n logging.debug(\"Get Param\")\n self.GetParameterValue(sock,ApplicationID, \"\", \"TransceiverMgr/Channels\" )\n\n else: # If failed the retry...\n logging.warning(\"Connection failed!\")\n msg = bytearray(b'CON\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n elif data[4:7] == b'REQ':\n logging.debug('RES REQ received...')\n msg = data[30:].decode(\"utf-8\").rstrip(\"\\0\")\n root = ET.fromstring(msg)\n\n element = \"\"\n for table in root.iter('GetParameterResponse'):\n for child in table:\n for child2 in child:\n if child2.tag == 'value':\n element = child2.text.split(',')\n\n if len(element) > 0:\n if self.mode == -1: # If we already got a mode from command line parameter...\n print('\\n\\rTransponder to use:')\n i = 0\n for e in element:\n print('{:d}: '.format(i) + e)\n i = i+1\n\n # If there are only one head, then select it, no question...\n if len(element) == 1: # If there is only one option...\n self.mode = 0\n else: # Else let the user select...\n self.mode = -1\n while self.mode < 0 or self.mode > len(element):\n try:\n self.mode=int(input('Select Transponder: '))\n except ValueError:\n print (\"Not a number\")\n else:\n print('{:d}: '.format(self.mode) + element[self.mode])\n\n transponder = element[self.mode]\n #print(self.mtype)\n\n if self.mtype == \"Set_Param\":\n self.SetParameter(sock, ApplicationID, transponder, self.EK_req, self.EK_Value, self.EK_Type)\n self.running = self.running | self.Status_Done\n break\n else:\n self.subscribe(sock, ApplicationID, transponder, True)\n else:\n logging.debug(\"Received status\")\n if self.mtype == \"Set_Param\":\n self.cont = True\n self.running = self.running | self.Status_Command\n else:\n logging.warning(\"Received unknown response\")\n\n elif data[:3] == b'ALI':\n msg = 'ALI\\0ClientID:{:d},SeqNo:{:d}\\0'.format(ApplicationID, self.client_seq_no)\n msg = bytes(msg, encoding='utf-8')\n sock.send(msg) # Send connect...\n # logging.debug('.')\n elif data[:3] == b'RTR':\n logging.debug(\"RTR received: %s\", data)\n elif data[:3] == b'REQ':\n logging.debug(\"REQ received\")\n elif data[:3] == b'PRD':\n logging.debug(\"PRD received: {}\".format(data))\n else:\n logging.debug(\"Wrong data\")\n else:\n logging.error(\"EK80 error...\")\n\n\n try:\n data = sock.recv(20000)\n except socket.timeout:\n continue\n\n logging.debug(\"Closing command handler\")\n\n self.running = self.running & ~self.Status_Command\n msg = bytearray(b'DIS\\0Name:Simrad;Password:\\0')\n sock.send(msg) # Send connect...\n\n sock.settimeout(None)\n sock.close()\n\n #----------------------------------------------------------------------------\n # Method EK80_data\n # Description The subscription data handler...\n # Data is parsed according to the XML file...\n #-----------------------------------------------------------------------------\n\n def EK80_data(self,a,b):\n time = 0\n\n # Open the default channel...\n logging.debug(\"Setting up data channel\")\n\n datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n datasock.bind((\"0.0.0.0\", self.UDP_DATA))\n self.UDP_DATA = datasock.getsockname()[1]\n datasock.settimeout(5.0)\n logging.info('EK80data listening on port: %d', self.UDP_DATA)\n self.running = self.running | self.Status_Data\n\n # Data can in some case be received in frame sets, we then need to make sure that we start with the first frame in the set.\n # Some time we are a bit slow hence the Busy structure...\n while self.running & self.Status_Running:\n try:\n data = datasock.recv(50000)\n except socket.timeout:\n continue\n\n Decode = unpack('<4siiHHH',data[0:18])\n\n if self.busy == 110 and Decode[4] == Decode[3]:\n self.busy = 2 # Ready from next...\n else:\n self.finale_data = self.finale_data+data[18:]\n self.totalbytes = self.totalbytes + Decode[5]\n\n if Decode[4] == Decode[3]:\n self.busy = 1 #Busy...\n\n if self.debug == True:\n print(\"\\n\\rHeader: \".format(Decode[0].decode('utf-8')))\n print(Decode[0])\n print(\"SeqNo: {:d}\".format(Decode[1]))\n print(\"SubID: {:d}\".format(Decode[2]))\n print(\"CurrentMsg: {:d}\".format(Decode[3]))\n print(\"TotalMsg: {:d}\".format(Decode[4]))\n print(\"NoOfBytes: {:d}\".format(Decode[5]))\n\n if self.itypeSize > 0:\n tmp = unpack(\" 0:\n for loop in range(0,tmp[1]):\n start = loop*self.itypeSize\n end = (loop*self.itypeSize)+self.itypeSize\n dta = self.finale_data[start:end]\n Payload.append(unpack(\"<\"+self.itypeVal,self.finale_data[start:end]))\n\n if self.debug == 2:\n for element in Payload:\n for elements in element:\n print(\"Value: {:f}\".format(elements))\n\n else:\n Payload = unpack(\" 0:\n datasock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n datasock.bind((\"0.0.0.0\", int(self.NMEA_DATA)))\n self.NMEA_DATA = datasock.getsockname()[1]\n datasock.settimeout(5.0)\n print('NMEA listening on port:', self.NMEA_DATA)\n data = b\"\"\n self.running = self.running | self.Status_NMEA\n\n while self.running & self.Status_Running:\n try:\n data = datasock.recv(20000)\n except socket.timeout:\n continue\n\n self.NMEAdecode(data)\n # maybe add a sleep here?\n\n logging.debug(\"NMEA closed\")\n\n datasock.settimeout(None)\n datasock.close()\n\n else:\n logging.debug(\"NMEA not used\")\n\n self.running = self.running & ~self.Status_NMEA\n\n #----------------------------------------------------------------------------\n # Method man function, entry point\n # Description Parse the XML and get started...\n #-----------------------------------------------------------------------------\n def main(self):\n \"\"\"\n \"\"\"\n # Request an port number from the EK80 to use for future communications.\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((self.UDP_IP, self.UDP_PORT))\n sock.settimeout(5.0)\n sock.send('RSI\\0'.encode()) # Send reset...\n try:\n data = sock.recv(8000)\n except socket.timeout:\n print (\"No Equipment found, make shure the IP:port is set to: {:s}:{:d}\".format(self.UDP_IP, self.UDP_PORT))\n sock.close()\n return\n\n # Print status so far....\n print('Unit: ', data[4:8])\n print('ID: ', data[272:283])\n port = bytes_to_int(data[264:266])\n\n # Close and reopen a new channel...\n sock.settimeout(None)\n sock.close()\n\n #----------------------------------------------------------------------------\n # Start comunication...\n if len(data) > 3:\n\n self.running = self.Status_Running # Start running...\n\n logging.debug(\"Starting NMEA thread\")\n thread3 = threading.Thread(target=self.NMEA_data, args=(0, 0))\n thread3.start()\n\n logging.debug(\"Start Data thread\")\n thread2 = threading.Thread(target=self.EK80_data, args=(0, 0))\n thread2.start()\n\n logging.debug(\"Awaiting Data handler ready...\")\n\n while (self.running & self.Status_Data) == 0:\n time.sleep(1)\n if thread2.isAlive() == 0:\n break\n\n # If the data thread is running (should always be...)\n if self.running & self.Status_Data:\n logging.debug(\"Starting command thread\")\n\n thread1 = threading.Thread(target = self.EK80_comunicate, args = (port, data))\n thread1.start()\n\n while (self.running & self.Status_Command) == 0 and (self.running & self.Status_Done) == 0:\n time.sleep(1)\n if thread1.isAlive() == 0:\n break\n\n # Do data handle until enter i pressed...\n if self.running & self.Status_Command and self.cont == False:\n input('Enter to exit...')\n\n # Exit gracefully...\n logging.debug(\"Stopping\")\n\n time.sleep(4)\n self.running = self.running & ~self.Status_Running\n while self.running & ~self.Status_Done:\n time.sleep(1)\n\n time.sleep(2)\n", "sub_path": "t9ek80/t9ek80/t9ek80.py", "file_name": "t9ek80.py", "file_ext": "py", "file_size_in_byte": 24765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 103, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 103, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 302, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 302, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 302, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 336, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 336, "usage_type": "name"}, {"api_name": "socket.timeout", "line_number": 402, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 426, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 426, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 426, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 438, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 463, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 463, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 481, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 481, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 512, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 512, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 512, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 523, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 547, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 547, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 547, "usage_type": "attribute"}, {"api_name": "socket.timeout", "line_number": 553, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 574, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 578, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 584, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 592, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 596, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 607, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 610, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 612, "usage_type": "call"}]}
+{"seq_id": "523771320", "text": "import numpy as np\nimport os, csv\nimport cPickle as pickle\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nimport datetime as dt\n\nauthors = {}\n\nbasedir = '/home/cwp/EMC/lib/analysis/zhr/'\n\n# Get directories\ncwd = '/home/cwp/EMC/data/authors/'\nfilenames = os.listdir(cwd)\n\nfor filename in filenames:\n with open(cwd+filename, 'rb') as input:\n authors[filename[:-4]] = pickle.load(input)\n\ndef getObservers(filepath):\n base = filepath\n toReturn = {}\n\n for filename in os.listdir(filepath):\n toReturn[filename[:-4]] = []\n\n with open(base + filename, 'r') as f:\n for line in f.readlines():\n toReturn[filename[:-4]].append(line.split('\\n')[0])\n\n os.remove(base+filename)\n\n return toReturn\n\ndef getShowerInfo(filepath):\n data = {}\n with open(basedir+filepath, 'r') as f:\n readFile = list(csv.reader(f))\n for line in readFile:\n data[int(line[0])] = {'ra':float(line[1]),\\\n 'dec':float(line[2]), 'peak':line[3], 'start':line[4], \\\n 'end':line[5], 'r':float(line[6]), 'zhr_exp':int(line[7]),\\\n 'zhr_max':int(line[8])}\n\n return data\n\ndef getDateRange(start,end,startYear,endYear):\n startDate = datetime(startYear, int(start.split('/')[1]), \\\n int(start.split('/')[0]))\n\n endDate = datetime(endYear, int(end.split('/')[1]), \\\n int(end.split('/')[0]))\n\n dates = []\n\n while startDate <= endDate:\n dates.append(startDate)\n startDate += dt.timedelta(days=1)\n\n return dates\n\nshowers = ['perseids', 'leonids', 'quadrantids', 'geminids', 'orionids', 'eta_aquariids']\ndates = ['2005','2006','2007','2010','2011','2012','2013','2014','2015','2016']\n\nshowerObservers = {}\nshowerObserversFinal = {}\n\nfor shower in showers:\n showerObservers[shower] = getObservers(basedir+'dates/'+shower+'/')\n\nfor shower, observers in showerObservers.items():\n print('========'+shower+'========')\n\n showerData = getShowerInfo(shower+'radiant.txt')\n\n count = 0\n\n for observer in observers:\n #print(observer)\n okayDates = []\n\n for date in observers[observer]:\n noNaN = True\n noPeakNaN = True\n year = int(date[:4])\n\n if year in showerData.keys():\n peakDate = datetime(year,int(\\\n showerData[year]['peak'].split('/')[1]), int(\\\n showerData[year]['peak'].split('/')[0]))\n\n if shower != 'quadrantids':\n activeRange = getDateRange(showerData[year]['start'],\\\n showerData[year]['end'], year, year)\n else:\n activeRange = getDateRange(showerData[year]['start'],\\\n showerData[year]['end'], year, year+1)\n\n entry = authors[observer].data[date]\n entry.loadData()\n\n for day, dayData in entry.data.items():\n try:\n currentDate = datetime(int(date.split('-')[0]), \\\n int(date.split('-')[1]), int(day))\n if currentDate in activeRange:\n dayData = dayData[:-1]\n\n for hour in dayData:\n if hour == '-1':\n noNaN += 1\n if (currentDate == peakDate) and (hour == '-1'):\n noPeakNaN = False\n\n except:\n pass\n\n if (noNaN < 12) and noPeakNaN:\n okayDates.append(date)\n\n if len(okayDates) != 0:\n finalDates = []\n if shower == 'quadrantids':\n for aDate in okayDates:\n if aDate[-2:] == '01':\n if str(int(aDate[:-3])-1)+'-12' in okayDates:\n finalDates.append(str(int(aDate[:-3])-1)+'-12')\n finalDates.append(aDate)\n\n if shower == 'geminids':\n for aDate in okayDates:\n finalDates.append(aDate)\n\n if shower == 'leonids':\n for aDate in okayDates:\n finalDates.append(aDate)\n\n if shower == 'orionids':\n for aDate in okayDates:\n if aDate[-2:] == '10':\n if aDate[:-2]+'11' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'11')\n\n if shower == 'perseids':\n for aDate in okayDates:\n if aDate[-2:] == '07':\n if aDate[:-2]+'08' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'08')\n\n if shower == 'eta_aquariids':\n for aDate in okayDates:\n if aDate[-2:] == '04':\n if aDate[:-2]+'05' in okayDates:\n finalDates.append(aDate)\n finalDates.append(aDate[:-2]+'05')\n\n if len(finalDates) != 0:\n count += 1\n\n with open(basedir+'dates/'+shower+'/'+observer+'.txt', 'w') as f:\n for date in finalDates:\n f.write(date)\n f.write('\\n')\n print(count)\n", "sub_path": "zhr/refineAuthors.py", "file_name": "refineAuthors.py", "file_ext": "py", "file_size_in_byte": 5491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "call"}]}
+{"seq_id": "384900255", "text": "# coding: utf8\nfrom coll_avoidance_modules.solo_coll_wrapper_c import *\nfrom coll_avoidance_modules.collisions_controller import *\nfrom coll_avoidance_modules.collisionsViewerClient import *\n\nfrom utils.logger import Logger\nfrom pynput import keyboard\n\nimport numpy as np\nimport argparse\nimport math\nfrom time import clock, sleep\nfrom solo12 import Solo12\n\n\ndef compute_pd(q_desired, v_desired, KP, KD, device):\n\tpos_error = q_desired - device.q_mes\n\tvel_error = v_desired - device.v_mes\n\ttau = KP * pos_error + KD * vel_error #+ KT * tau_desired\n\t#tau = np.maximum(np.minimum(tau, tau_max), -tau_max) \n\treturn tau\n\n\ndef on_press(key):\n\tglobal key_pressed\n\ttry:\n\t\tif key == keyboard.Key.enter:\n\t\t\tkey_pressed = True\n\t\t\t# Stop listener\n\t\t\treturn False\n\texcept AttributeError:\n\t\tprint('Unknown key {0} pressed'.format(key))\n\n\ndef put_on_the_floor(device, q_init):\n\tglobal key_pressed\n\tkey_pressed = False\n\tKp_pos = 3.\n\tKd_pos = 0.01\n\timax = 3.0\n\tpos = np.zeros(device.nb_motors)\n\tfor motor in range(device.nb_motors):\n\t\tpos[motor] = q_init[device.motorToUrdf[motor]] * device.gearRatioSigned[motor]\n\tlistener = keyboard.Listener(on_press=on_press)\n\tlistener.start()\n\tprint(\"Put the robot on the floor and press Enter\")\n\twhile not key_pressed:\n\t\tdevice.UpdateMeasurment()\n\t\tfor motor in range(device.nb_motors):\n\t\t\tref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() - Kd_pos*device.hardware.GetMotor(motor).GetVelocity())\n\t\t\tref = min(imax, max(-imax, ref))\n\t\t\tdevice.hardware.GetMotor(motor).SetCurrentReference(ref)\n\t\tdevice.SendCommand(WaitEndOfCycle=True)\n\n\tprint(\"Start the motion.\")\n\n\ndef example_script(name_interface, legs_clib_path, shd_clib_path):\n device = Solo12(name_interface,dt=0.001)\n nb_motors = device.nb_motors\n LOGGING = False\n VIEWER = False\n \n qc = None\n if LOGGING:\n # Initialize logger\n qc = QualisysClient(ip=\"140.93.16.160\", body_id=0) # ??\n logger = Logger(device, qualisys=qc, logSize=50000)\n \n #### Set ref. traj. PD parameters\n ref_traj_KP = 0\n ref_traj_KV = 0\n active_dof = [0,1,2,3,4,5,6,7,8,9,10,11]\n\n #### Set collision avoidance parameters\n legs_threshold = 0.05\n legs_kp = 20.\n legs_kv = 0.0\n nb_legs_pairs = 20\n\n #### Shoulder collision parameters\n shd_threshold = 0.2\n shd_kp = 3.\n shd_kv = 0.\n\n #### Reference traj. parameters\n q_ref_list = '###.npy'\n dq_ref_list = '###.npy'\n\n traj_KP = 1*np.ones(12)\n traj_KP[:] = 0.\n traj_KV = 0*np.ones(12)\n\n q_init = q_ref_list[0][7:]\n traj_counter = 0\n\n ### Emergency behavior switches\n q_bounds = [-4,4]\n vq_max = 20.0\n tau_q_max = 1.0\n\n # Load the specified compiled C library\n cCollFun = CDLL(legs_clib_path)\n nnCCollFun = CDLL(shd_clib_path)\n # Initialize emergency behavior trigger var.\n emergencyFlag = 0\n\n # Initialize viewer\n if VIEWER:\n viewer_coll = viewerClient(nb_legs_pairs, 3, legs_threshold, shd_threshold, urdf=\"/home/ada/git/tnoel/solopython/coll_avoidance_modules/urdf/solo12_simplified.urdf\", modelPath=\"/home/ada/git/tnoel/solopython/coll_avoidance_modules/urdf\")\n\n device.Init(calibrateEncoders=True, q_init=q_init)\n\n put_on_the_floor(device, q_init)\n #CONTROL LOOP ***************************************************\n tau_q = np.zeros(nb_motors)\n tau_PD = np.zeros(nb_motors)\n while ((not device.hardware.IsTimeout()) and (clock() < 120) and emergencyFlag==0):\n device.UpdateMeasurment()\n\n tau_q[:] = 0.\n tau_PD[:] = 0.\n\n # Compute PD to follow reference traj.\n curr_q_ref = q_ref_list[traj_counter][7:]\n curr_dq_ref = dq_ref_list[traj_counter][6:]\n tau_PD = compute_pd(q_desired, v_desired, KP, KD, device)\n\n traj_counter += 1\n\n # Compute collisions distances and jacobians from the C lib. \n c_results = getLegsCollisionsResults(device.q_mes, cCollFun, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_dist_legs = getLegsDistances(c_results, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_Jlegs = getLegsJacobians(c_results, nb_motors, nb_legs_pairs, witnessPoints=True)\n c_wPoints = getLegsWitnessPoints(c_results, nb_motors, nb_legs_pairs)\n \n ### Get results from C generated code (shoulder neural net)\n #c_shd_dist, c_shd_jac = getAllShouldersCollisionsResults(device.q_mes, nnCCollFun, 2, offset=0.08) # 2D neural net\n c_shd_dist, c_shd_jac = getAllShouldersCollisionsResults(device.q_mes, nnCCollFun, 3, offset=0.11) #offset with 3 inputs: 0.18 (small), 0.11 (large)\"\n\n # Compute collision avoidance torque\n tau_legs = computeRepulsiveTorque(device.q_mes, device.v_mes, c_dist_legs, c_Jlegs, legs_threshold, legs_kp, legs_kv, opposeJacIfNegDist=True)\n tau_shd = computeRepulsiveTorque(device.q_mes, device.v_mes, c_shd_dist, c_shd_jac, shd_threshold, shd_kp, shd_kv, opposeJacIfNegDist=False)\n\n tau_q = 1*tau_legs + 1*tau_shd\n\n # Set the computed torque as command\n tau_command = tau_q + tau_PD\n\n device.SetDesiredJointTorque(0*tau_command)\n # Check the condition for triggering emergency behavior\n emergencyFlag = max(emergencyFlag, emergencyCondition(device.q_mes, device.v_mes, tau_command, q_bounds, vq_max, tau_q_max))\n # Call logger\n if LOGGING:\n logger.sample(device, qualisys=qc)\n\n if VIEWER :\n viewer_coll.display(np.concatenate(([0,0,0,0,0,0,0],device.q_mes)), c_dist_legs, c_shd_dist, c_wPoints, tau_legs, tau_shd)\n\n device.SendCommand(WaitEndOfCycle=True)\n if ((device.cpt % 100) == 0):\n device.Print()\n print('Avoid. torque')\n print(tau_q)\n print('PD torque')\n print(tau_PD)\n\n\n #****************************************************************\n\n print(\"Emergency : {}\".format(emergencyFlag))\n\n # Whatever happened we send 0 torques to the motors.\n device.SetDesiredJointTorque([0]*nb_motors)\n device.SendCommand(WaitEndOfCycle=True)\n \n # Save the logs of the Logger object\n if LOGGING:\n logger.saveAll()\n print(\"Log saved\")\n \n if device.hardware.IsTimeout():\n print(\"Masterboard timeout detected.\")\n print(\"Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.\")\n device.hardware.Stop() # Shut down the interface between the computer and the master board\n \ndef main():\n parser = argparse.ArgumentParser(description='Example masterboard use in python.')\n parser.add_argument('-i',\n '--interface',\n required=True,\n help='Name of the interface (use ifconfig in a terminal), for instance \"enp1s0\"')\n\n parser.add_argument('-CL',\n '--cliblegs',\n required=True,\n help='Path to the compiled C-generated library used for distance and jacobian evaluations, for instance \"libcoll_legs8.so\"')\n\n parser.add_argument('-CS',\n '--clibshd',\n required=True,\n help='Path to the compiled C-generated library used for shoulder distance and jacobian evaluations, for instance \"libcoll_nn.so\"')\n\n example_script(parser.parse_args().interface, parser.parse_args().cliblegs, parser.parse_args().clibshd)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main_solo12_collision_avoidance.py", "file_name": "main_solo12_collision_avoidance.py", "file_ext": "py", "file_size_in_byte": 7389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pynput.keyboard.Key", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "pynput.keyboard.Listener", "line_number": 44, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 44, "usage_type": "name"}, {"api_name": "solo12.Solo12", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.logger.Logger", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 158, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}]}
+{"seq_id": "320765239", "text": "import json\nimport pytest\nimport uuid\nfrom httpretty import httpretty\n\nfrom rasa_core import utils\nfrom rasa_core.training import online\nfrom rasa_core.utils import EndpointConfig\n\n\n@pytest.fixture\ndef mock_endpoint():\n return EndpointConfig(\"https://abc.defg\")\n\n\ndef test_send_message(mock_endpoint):\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/messages'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.POST, url, body='{}')\n\n httpretty.enable()\n online.send_message(mock_endpoint, sender_id, \"Hello\")\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert json.loads(b) == {\n \"sender\": \"user\",\n \"text\": \"Hello\",\n \"parse_data\": None\n }\n\n\ndef test_request_prediction(mock_endpoint):\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/predict'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.POST, url, body='{}')\n\n httpretty.enable()\n online.request_prediction(mock_endpoint, sender_id)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert b == \"\"\n\n\ndef test_bot_output_format():\n message = {\n \"text\": \"Hello!\",\n \"data\": {\n \"image\": \"http://example.com/myimage.png\",\n \"attachment\": \"My Attachment\",\n \"buttons\": [\n {\"title\": \"yes\", \"payload\": \"/yes\"},\n {\"title\": \"no\", \"payload\": \"/no\"}]\n }\n }\n formatted = online.format_bot_output(message)\n assert formatted == (\"Hello!\\n\"\n \"Image: http://example.com/myimage.png\\n\"\n \"Attachment: My Attachment\\n\"\n \"1: yes (/yes)\\n\"\n \"2: no (/no)\")\n\n\ndef test_latest_user_message():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n tracker_json = json.loads(utils.read_file(tracker_dump))\n\n m = online.latest_user_message(tracker_json.get(\"events\"))\n\n assert m is not None\n assert m[\"event\"] == \"user\"\n assert m[\"text\"] == \"/mood_great\"\n\n\ndef test_latest_user_message_on_no_events():\n m = online.latest_user_message([])\n\n assert m is None\n\n\ndef test_all_events_before_user_msg():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n tracker_json = json.loads(utils.read_file(tracker_dump))\n evts = tracker_json.get(\"events\")\n\n m = online.all_events_before_latest_user_msg(evts)\n\n assert m is not None\n assert m == evts[:4]\n\n\ndef test_all_events_before_user_msg_on_no_events():\n assert online.all_events_before_latest_user_msg([]) == []\n\n\ndef test_print_history(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n\n httpretty.enable()\n online._print_history(sender_id, mock_endpoint)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n assert b == \"\"\n assert (httpretty.latest_requests[-1].path ==\n \"/conversations/{}/tracker?include_events=AFTER_RESTART\"\n \"\".format(sender_id))\n\n\ndef test_is_listening_for_messages(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n\n httpretty.enable()\n is_listening = online.is_listening_for_message(sender_id, mock_endpoint)\n httpretty.disable()\n\n assert is_listening\n\n\ndef test_splitting_conversation_at_restarts():\n tracker_dump = \"data/test_trackers/tracker_moodbot.json\"\n evts = json.loads(utils.read_file(tracker_dump)).get(\"events\")\n evts_wo_restarts = evts[:]\n evts.insert(2, {\"event\": \"restart\"})\n evts.append({\"event\": \"restart\"})\n\n split = online._split_conversation_at_restarts(evts)\n assert len(split) == 2\n assert [e for s in split for e in s] == evts_wo_restarts\n assert len(split[0]) == 2\n assert len(split[0]) == 2\n\n\ndef test_as_md_message():\n parse_data = {\n \"text\": \"Hello there rasa.\",\n \"entities\": [{\"start\": 12,\n \"end\": 16,\n \"entity\": \"name\",\n \"value\": \"rasa\"}],\n \"intent\": {\"name\": \"greeting\", \"confidence\": 0.9}\n }\n md = online._as_md_message(parse_data)\n assert md == \"Hello there [rasa](name).\"\n\n\ndef test_validate_user_message():\n parse_data = {\n \"text\": \"Hello there rasa.\",\n \"parse_data\": {\n \"entities\": [{\"start\": 12,\n \"end\": 16,\n \"entity\": \"name\",\n \"value\": \"rasa\"}],\n \"intent\": {\"name\": \"greeting\", \"confidence\": 0.9}\n }\n }\n assert online._validate_user_regex(parse_data, [\"greeting\", \"goodbye\"])\n assert not online._validate_user_regex(parse_data, [\"goodbye\"])\n\n\ndef test_undo_latest_msg(mock_endpoint):\n tracker_dump = utils.read_file(\n \"data/test_trackers/tracker_moodbot.json\")\n tracker_json = json.loads(tracker_dump)\n evts = tracker_json.get(\"events\")\n\n sender_id = uuid.uuid4().hex\n\n url = '{}/conversations/{}/tracker'.format(\n mock_endpoint.url, sender_id)\n replace_url = '{}/conversations/{}/tracker/events'.format(\n mock_endpoint.url, sender_id)\n httpretty.register_uri(httpretty.GET, url, body=tracker_dump)\n httpretty.register_uri(httpretty.PUT, replace_url)\n\n httpretty.enable()\n online._undo_latest(sender_id, mock_endpoint)\n httpretty.disable()\n\n b = httpretty.latest_requests[-1].body.decode(\"utf-8\")\n\n # this should be the events the online call send to the endpoint\n # these events should have the last utterance omitted\n replaced_evts = json.loads(b)\n assert len(replaced_evts) == 6\n assert replaced_evts == evts[:6]\n", "sub_path": "tests/test_online.py", "file_name": "test_online.py", "file_ext": "py", "file_size_in_byte": 6136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "rasa_core.utils.EndpointConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 21, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 21, "usage_type": "name"}, {"api_name": "httpretty.httpretty.POST", "line_number": 21, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 23, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 23, "usage_type": "name"}, {"api_name": "rasa_core.training.online.send_message", "line_number": 24, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 24, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 25, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 25, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 27, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 27, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 36, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 40, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 40, "usage_type": "name"}, {"api_name": "httpretty.httpretty.POST", "line_number": 40, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 42, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 42, "usage_type": "name"}, {"api_name": "rasa_core.training.online.request_prediction", "line_number": 43, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 43, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 44, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 44, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 46, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 46, "usage_type": "name"}, {"api_name": "rasa_core.training.online.format_bot_output", "line_number": 61, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 61, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 71, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 71, "usage_type": "name"}, {"api_name": "rasa_core.training.online.latest_user_message", "line_number": 73, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 73, "usage_type": "name"}, {"api_name": "rasa_core.training.online.latest_user_message", "line_number": 81, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 81, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 88, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 88, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 88, "usage_type": "name"}, {"api_name": "rasa_core.training.online.all_events_before_latest_user_msg", "line_number": 91, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 91, "usage_type": "name"}, {"api_name": "rasa_core.training.online.all_events_before_latest_user_msg", "line_number": 98, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 98, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 102, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 102, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 105, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 109, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 109, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 109, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 111, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 111, "usage_type": "name"}, {"api_name": "rasa_core.training.online._print_history", "line_number": 112, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 112, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 113, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 113, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 115, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 115, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 117, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 117, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 123, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 123, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 126, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 130, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 130, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 130, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 132, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 132, "usage_type": "name"}, {"api_name": "rasa_core.training.online.is_listening_for_message", "line_number": 133, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 133, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 134, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 134, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 141, "usage_type": "call"}, {"api_name": "rasa_core.utils.read_file", "line_number": 141, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 141, "usage_type": "name"}, {"api_name": "rasa_core.training.online._split_conversation_at_restarts", "line_number": 146, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 146, "usage_type": "name"}, {"api_name": "rasa_core.training.online._as_md_message", "line_number": 162, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 162, "usage_type": "name"}, {"api_name": "rasa_core.training.online._validate_user_regex", "line_number": 177, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 177, "usage_type": "name"}, {"api_name": "rasa_core.training.online._validate_user_regex", "line_number": 178, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 178, "usage_type": "name"}, {"api_name": "rasa_core.utils.read_file", "line_number": 182, "usage_type": "call"}, {"api_name": "rasa_core.utils", "line_number": 182, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 184, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 187, "usage_type": "call"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 193, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 193, "usage_type": "name"}, {"api_name": "httpretty.httpretty.GET", "line_number": 193, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.register_uri", "line_number": 194, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 194, "usage_type": "name"}, {"api_name": "httpretty.httpretty.PUT", "line_number": 194, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty.enable", "line_number": 196, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 196, "usage_type": "name"}, {"api_name": "rasa_core.training.online._undo_latest", "line_number": 197, "usage_type": "call"}, {"api_name": "rasa_core.training.online", "line_number": 197, "usage_type": "name"}, {"api_name": "httpretty.httpretty.disable", "line_number": 198, "usage_type": "call"}, {"api_name": "httpretty.httpretty", "line_number": 198, "usage_type": "name"}, {"api_name": "httpretty.httpretty.latest_requests", "line_number": 200, "usage_type": "attribute"}, {"api_name": "httpretty.httpretty", "line_number": 200, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 204, "usage_type": "call"}]}
+{"seq_id": "5665522", "text": "import nltk\nfrom datetime import datetime\nfrom nltk_data.stop_words_data.stop_word_processing import get_stop_words\nfrom string import whitespace\nfrom collections import Counter\nfrom langdetect import detect\n\n\ndef avg(a, b):\n return a / b if b != 0 else 0\n\n\nclass SimpleMetricsCallback(object):\n sent_detector = nltk.tokenize.punkt.PunktSentenceTokenizer()\n\n def timedelta(self, creation_time):\n \"\"\" return days between the article publication\n and the dataset acquisition.\"\"\"\n creation = datetime.strptime(creation_time[:19], '%Y-%m-%d %H:%M:%S')\n now = datetime.utcnow()\n delta = now - creation\n return delta.days\n\n @staticmethod\n def n_symbols(text, ignore_spaces=False):\n if ignore_spaces:\n return len([c for c in text if c not in whitespace])\n else:\n return len(text)\n\n @staticmethod\n def n_syllables(words):\n count = 0\n vowels = 'aeiouy'\n\n for word in words:\n if word[0] in vowels:\n count += 1\n for i in range(1, len(word)):\n if word[i] in vowels and word[i-1] not in vowels:\n count += 1\n if word.endswith('e'):\n count -= 1\n\n return count\n\n def n_sentences(self, text):\n return len(self.sent_detector.tokenize(text.strip()))\n\n @staticmethod\n def most_common_words(words, count=5):\n words = Counter(words)\n most_common = words.most_common(count)\n if most_common:\n return ', '.join('\"{}\": {}'.format(k, v) for k, v in most_common)\n else:\n return '-'\n\n def __call__(self, text):\n if text == \"\":\n return (\n ('n_symbols', 0),\n ('n_symbols_no_space', 0),\n ('n_syllables', 0),\n ('n_sentences', 0),\n ('n_tokens_content', 0),\n ('n_unique_tokens', 0),\n ('n_non_stop_words', 0),\n ('n_non_stop_unique_tokens', 0),\n ('average_sentence_length', 0),\n ('average_token_length', 0),\n ('average_token_length_syllables', 0),\n ('most_common_non_stop_words', 0),\n )\n\n try:\n text_lang = detect(text)\n except Exception as e:\n text_lang = 'en'\n\n n_symbols = self.n_symbols(text)\n n_symbols_no_space = self.n_symbols(text, ignore_spaces=True)\n n_sentences = self.n_sentences(text)\n words = [w for w in nltk.tokenize.word_tokenize(text) if w.isalpha()]\n\n if text_lang == 'de':\n self.stop_words = get_stop_words('de')\n else:\n # english stopwords by default\n self.stop_words = get_stop_words('en')\n\n non_stop_words = [word for word in words if word not in self.stop_words]\n n_syllables = self.n_syllables(words)\n\n return (\n ('n_symbols', n_symbols),\n ('n_symbols_no_space', n_symbols_no_space),\n ('n_syllables', n_syllables),\n ('n_sentences', n_sentences),\n ('n_tokens_content', len(words)),\n ('n_unique_tokens', len(set(words))),\n ('n_non_stop_words', len(non_stop_words)),\n ('n_non_stop_unique_tokens', len(set(non_stop_words))),\n ('average_sentence_length', avg(len(words), n_sentences)),\n ('average_token_length', avg(sum([len(word) for word in words]), len(words))),\n ('average_token_length_syllables', avg(n_syllables, len(words))),\n ('most_common_non_stop_words', self.most_common_words(non_stop_words)),\n )\n", "sub_path": "parameters_extractor/metrics/simple.py", "file_name": "simple.py", "file_ext": "py", "file_size_in_byte": 3669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "nltk.tokenize.punkt.PunktSentenceTokenizer", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "string.whitespace", "line_number": 27, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 52, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 77, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 84, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 84, "usage_type": "attribute"}, {"api_name": "nltk_data.stop_words_data.stop_word_processing.get_stop_words", "line_number": 87, "usage_type": "call"}, {"api_name": "nltk_data.stop_words_data.stop_word_processing.get_stop_words", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "297982540", "text": "\"\"\"\nThe `OWER Directory` contains the input files required for training the\n`OWER Classifier`. The `OWER Temp Directory` keeps intermediate files\nfor debugging purposes.\n\n**Structure**\n\n::\n\n ower/ # OWER Directory\n\n tmp/ # OWER Temp Directory\n\n ent_labels.txt # OWER Entity Labels TXT\n rel_labels.txt # OWER Relation Labels TXT\n\n classes.tsv # OWER Classes TSV\n\n test.tsv # OWER Test Samples TSV\n train.tsv # OWER Train Samples TSV\n valid.tsv # OWER Valid Samples TSV\n\n|\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nfrom spacy.lang.en import English\nfrom torchtext.data import TabularDataset, Field\nfrom torchtext.vocab import Vocab\n\nfrom dao.base_dir import BaseDir\nfrom dao.ower.classes_tsv import ClassesTsv\nfrom dao.ower.samples_tsv import SamplesTsv\nfrom dao.ower.tmp.tmp_dir import TmpDir\nfrom dao.ryn.split.labels_txt import LabelsTxt\n\n\n@dataclass\nclass Sample:\n ent: int\n classes: List[int]\n sents: List[List[int]]\n\n def __iter__(self):\n return iter((self.ent, self.classes, self.sents))\n\n\nclass OwerDir(BaseDir):\n tmp_dir: TmpDir\n\n ent_labels_txt: LabelsTxt\n rel_labels_txt: LabelsTxt\n\n classes_tsv: ClassesTsv\n\n train_samples_tsv: SamplesTsv\n valid_samples_tsv: SamplesTsv\n test_samples_tsv: SamplesTsv\n\n def __init__(self, path: Path):\n super().__init__(path)\n\n self.tmp_dir = TmpDir(path.joinpath('tmp'))\n\n self.ent_labels_txt = LabelsTxt(path.joinpath('ent_labels.txt'))\n self.rel_labels_txt = LabelsTxt(path.joinpath('rel_labels.txt'))\n\n self.classes_tsv = ClassesTsv(path.joinpath('classes.tsv'))\n\n self.train_samples_tsv = SamplesTsv(path.joinpath('train.tsv'))\n self.valid_samples_tsv = SamplesTsv(path.joinpath('valid.tsv'))\n self.test_samples_tsv = SamplesTsv(path.joinpath('test.tsv'))\n\n def check(self) -> None:\n super().check()\n\n self.tmp_dir.check()\n\n self.ent_labels_txt.check()\n self.rel_labels_txt.check()\n\n self.classes_tsv.check()\n\n self.train_samples_tsv.check()\n self.valid_samples_tsv.check()\n self.test_samples_tsv.check()\n\n def create(self) -> None:\n super().create()\n\n self.tmp_dir.create()\n\n def read_datasets(self, class_count: int, sent_count: int, vectors=None) \\\n -> Tuple[List[Sample], List[Sample], List[Sample], Vocab]:\n \"\"\"\n :param vectors: Pre-trained word embeddings\n \"\"\"\n\n nlp = English()\n spacy_tokenizer = nlp.tokenizer\n\n def tokenize(text: str) -> List[str]:\n return [token.text for token in spacy_tokenizer(text.strip())]\n\n ent_field = Field(sequential=False, use_vocab=False)\n ent_label_field = Field()\n class_field = Field(sequential=False, use_vocab=False)\n sent_field = Field(sequential=True, use_vocab=True, tokenize=tokenize, lower=True)\n\n ent_col = ('ent', ent_field)\n ent_label_col = ('ent_label', ent_label_field)\n class_cols = [(f'class_{i}', class_field) for i in range(class_count)]\n sent_cols = [(f'sent_{i}', sent_field) for i in range(sent_count)]\n\n cols = [ent_col, ent_label_col] + class_cols + sent_cols\n\n train_tab_set = TabularDataset(str(self.train_samples_tsv.path), 'tsv', cols, skip_header=True)\n valid_tab_set = TabularDataset(str(self.valid_samples_tsv.path), 'tsv', cols, skip_header=True)\n test_tab_set = TabularDataset(str(self.test_samples_tsv.path), 'tsv', cols, skip_header=True)\n\n #\n # Build vocab on train data\n #\n\n sent_field.build_vocab(train_tab_set, vectors=vectors)\n vocab = sent_field.vocab\n\n #\n # Transform TabularDataset -> List[Sample]\n #\n\n def transform(raw_set: TabularDataset) -> List[Sample]:\n return [Sample(\n int(getattr(row, 'ent')),\n [int(getattr(row, f'class_{i}')) for i in range(class_count)],\n [[vocab[token] for token in getattr(row, f'sent_{i}')] for i in range(sent_count)]\n ) for row in raw_set]\n\n train_set = transform(train_tab_set)\n valid_set = transform(valid_tab_set)\n test_set = transform(test_tab_set)\n\n return train_set, valid_set, test_set, vocab\n", "sub_path": "src/dao/ower/ower_dir.py", "file_name": "ower_dir.py", "file_ext": "py", "file_size_in_byte": 4418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 41, "usage_type": "name"}, {"api_name": "dao.base_dir.BaseDir", "line_number": 51, "usage_type": "name"}, {"api_name": "dao.ower.tmp.tmp_dir.TmpDir", "line_number": 52, "usage_type": "name"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 54, "usage_type": "name"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 55, "usage_type": "name"}, {"api_name": "dao.ower.classes_tsv.ClassesTsv", "line_number": 57, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 59, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 60, "usage_type": "name"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 61, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "name"}, {"api_name": "dao.ower.tmp.tmp_dir.TmpDir", "line_number": 66, "usage_type": "call"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 68, "usage_type": "call"}, {"api_name": "dao.ryn.split.labels_txt.LabelsTxt", "line_number": 69, "usage_type": "call"}, {"api_name": "dao.ower.classes_tsv.ClassesTsv", "line_number": 71, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 73, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 74, "usage_type": "call"}, {"api_name": "dao.ower.samples_tsv.SamplesTsv", "line_number": 75, "usage_type": "call"}, {"api_name": "spacy.lang.en.English", "line_number": 102, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 105, "usage_type": "name"}, {"api_name": "torchtext.data.Field", "line_number": 108, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 109, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 110, "usage_type": "call"}, {"api_name": "torchtext.data.Field", "line_number": 111, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 120, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 121, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 122, "usage_type": "call"}, {"api_name": "torchtext.data.TabularDataset", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "torchtext.vocab.Vocab", "line_number": 97, "usage_type": "name"}]}
+{"seq_id": "519452795", "text": "\"\"\"\n/**************************************************************\n* Name : definitions.py\n* Author : Tom Sorteberg\n* Created : 12/08/2020\n* Course : CIS 152 Data Structures\n* Version : 1.0\n* OS : Windows 10 Professional 1909\n* Copyright : This is my own original work based on\n* specifications issued by our instructor\n* Description : This class file defines both the Schedule\n and Group wrapper data types for the\n Scheduling Application.\n* Academic Honesty: I attest that this is my original work.\n* I have not used unauthorized source code, either modified or\n* unmodified. I have not given other fellow student(s) access to\n* my program.\n***************************************************************/\n\"\"\"\nfrom constants import constants\nfrom modules.validate import check_ticket\nimport re\nfrom modules.validate import update_csv\nimport csv\nimport ast\nimport os\nimport shutil\n\n\"\"\" Class Schedule\"\"\"\n\n\nclass Schedule(object):\n \"\"\"\n This class represents a queue data structure with\n associated class and static functions. The Schedule\n queue is then populated with Group class nodes.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Default constructor.\n \"\"\"\n self._queue = []\n self._group_number = 1\n\n def insert(self, entries):\n \"\"\"\n Inserts a group object\n :param entries: Required list.\n :return: No return.\n \"\"\"\n\n # Local variable declaration and initialization.\n character_set = set(\"0123456789GSPR \")\n number_set = set(\"0123456789\")\n email_set = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n length = len(entries)\n priority = self.priority(entries)\n inserted = False\n no_email = False\n min_age = False\n max_age = False\n checked = []\n duplicate = False\n\n # Input validation.\n for entry in entries:\n # Validate ticket numbers.\n if len(entry[0]) != constants.TICKET_LEN \\\n or not character_set.issuperset(entry[0]) \\\n or not check_ticket(entry[0]):\n # Raise exception.\n raise ValueError(\"Invalid value for ticket number parameter.\")\n # Validate age.\n elif not number_set.issuperset((entry[1])) or int(entry[1]) < constants.MIN_AGE:\n # Raise exception.\n raise ValueError(\"Invalid value for age parameter.\")\n elif not number_set.issuperset((entry[2])) or int(entry[2]) < constants.MIN_HEIGHT:\n # Raise exception.\n raise ValueError(\"Invalid value for height parameter.\")\n # Validate email.\n elif entry[3] != \"\" and not re.search(email_set, entry[3]):\n # Raise exception.\n raise ValueError(\"Invalid value for email parameter.\")\n # If there are no email addresses, then set no_email variable to True.\n elif entry[3] != \"\":\n no_email = True\n\n # Check for duplicates and age verification.\n for entry in entries:\n if entry[0] is not None and entry[0] in checked:\n duplicate = True\n checked.append(entry[0])\n # If age field is not empty and is an integer.\n if entry[1] != \"\" and number_set.issuperset(entry[1]):\n # If the age field is less than or equal to 7,\n # set min_age iteration variable to true.\n if int(entry[1]) <= constants.MIN_ACCP:\n min_age = True\n # If the age field is greater than than or equal to 14,\n # set max_age iteration variable to true.\n if int(entry[1]) >= constants.MAX_ACCP:\n max_age = True\n\n # If no email address is provided.\n if not no_email:\n # Raise exception.\n raise ValueError(\"No value provided for email.\")\n # If accompany requirements are not met.\n elif min_age is True and max_age is not True:\n raise ValueError(\"Accompany requirements not met.\")\n # If there are duplicates.\n elif duplicate:\n # Raise exception.\n raise ValueError(\"Duplicate ticket exists\")\n\n # If the group is full or the queue is empty.\n if length == constants.MAX_GROUP or self.is_empty():\n # Append group object to queue.\n self._queue.append(Group(entries, priority, self._group_number))\n # Increment group number.\n self._group_number += 1\n # Set inserted to True.\n inserted = True\n # Else if group is less than 4, find a group with same priority\n # with room for additional members and if available, insert member\n # information.\n else:\n for group in self._queue:\n if (constants.MAX_GROUP - group.size()) >= length \\\n and priority == group.get_priority():\n inserted = True\n for entry in entries:\n group.update(entry)\n # If queue is not empty and no suitable group is found, create\n # a new group.\n if not inserted:\n self._queue.append(Group(entries, priority, self._group_number))\n # Increment group number.\n self._group_number += 1\n # Remove ticket entries from valid.csv to prevent additional\n # registration.\n update_csv(entries)\n # Write entry to backup.csv file in case of recovery.\n self.backup_csv()\n\n def remove(self):\n \"\"\"\n Function that removes a group from the queue based on group number.\n :return: Returns a Group object.\n \"\"\"\n # Return statement.\n return self._queue.pop(0)\n\n def size(self):\n \"\"\"\n Function that returns the size of the queue.\n :return: Returns an integer.\n \"\"\"\n # Return statement.\n return len(self._queue)\n\n def is_empty(self):\n \"\"\"\n Function that returns True if the queue is empty.\n :return: Returns a boolean.\n \"\"\"\n # Return statement.\n return len(self._queue) == 0\n\n def search(self, value):\n \"\"\"\n Function that performs a search based on group number.\n Returns true if found.\n :param value: Required integer.\n :return: Returns a boolean.\n \"\"\"\n # Local variable declaration and initialization.\n return_statement = False\n # Input Validation.\n if isinstance(value, int):\n # For loop to iterate through queue.\n # If value is found, return True.\n for group in self._queue:\n if group.get_group_num() == value:\n return_statement = True\n else:\n raise ValueError(\"Parameter value must be an integer.\")\n\n # Return statement.\n return return_statement\n\n def display_group(self, value):\n \"\"\"\n Function that displays group information based on group number.\n :param value: Required integer.\n :return: Returns a string.\n \"\"\"\n # Local variable declaration and initialization.\n return_statement = \"Group not found.\"\n member_statement = \"\"\n # Input Validation.\n if isinstance(value, int):\n # For loop to search for group value.\n for group in self._queue:\n # If group found, return group information.\n if group.get_group_num() == value:\n members = group.get_members()\n for member in members:\n member_statement = member_statement \\\n + \"\\nTicket#: \" + member[0] \\\n + \"\\nAge: \" + member[1] \\\n + \"\\nHeight: \" + member[2] \\\n + \"\\nemail: \" + member[3] + \"\\n\"\n\n return_statement = \"Group#: \" + str(group.get_group_num()) + \\\n \", Priority: \" + group.get_priority() + \\\n \", \\nMembers: \\n\" + member_statement\n # Return statement.\n return return_statement\n else:\n raise ValueError(\"Parameter value must be an integer.\")\n\n def import_csv(self):\n \"\"\"\n Function that imports data from backup .csv and rebuilds\n priority queue.\n :return: No return.\n \"\"\"\n # Local variable declaration and initialization.\n # Try except clause to check if file is available.\n try:\n with open('../backup/backup.csv', mode='r', newline=\"\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for line in csv_reader:\n # Convert string representation of list to type list.\n temp_string = line[2]\n temp_list = ast.literal_eval(temp_string)\n self._queue.append(Group(temp_list, line[1], int(line[0])))\n self._group_number += 1\n except FileNotFoundError:\n # Raise exception.\n raise FileNotFoundError(\"Backup.csv file cannot be found.\")\n\n def backup_csv(self):\n \"\"\"\n Function that exports data to a .csv for backup.\n :return: No return.\n \"\"\"\n\n # Overwrite export.csv if it exists.\n with open('../backup/backup.csv', mode='w', newline=\"\") as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n for group in self._queue:\n csv_writer.writerow([group.get_group_num(), group.get_priority(), group.get_members()])\n # Close open object.\n csv_file.close()\n\n def selection_sort(self):\n \"\"\"\n Function that performs a selection sort algorithm on the queue based\n on group priority.\n :return: No return.\n \"\"\"\n\n def swap(min_value, index_value):\n \"\"\"\n Helper function that exchanges queue positions from minimum value\n and index value.\n :param min_value: Required integer.\n :param index_value: Required integer.\n :return: No return.\n \"\"\"\n temp = self._queue[min_value]\n self._queue[min_value] = self._queue[index_value]\n self._queue[index_value] = temp\n\n # Local variable declaration and initialization.\n index = 0\n count = 0\n # While the index is less than the total size of the queue.\n while index < self.size() - 1:\n # Set the minimum index value to index.\n min_index = index\n # Set the probe index to index value plus one.\n probe = index + 1\n # While the probe index is less than the total size of the queue.\n while probe < self.size():\n # If probe index priority is less than minimum index priority.\n if self._queue[probe].get_priority() < self._queue[min_index].get_priority():\n # Set minimum index to probe index.\n min_index = probe\n # Increment counters.\n probe += 1\n count += 1\n # If minimum index value does not equal initial index value.\n if min_index != index:\n # Function call to swap group positions in queue if value is found.\n swap(min_index, index)\n # Increment counters.\n index += 1\n count += 1\n\n def export_csv(self):\n \"\"\"\n Function that exports data to a .csv for backup.\n :return: No return.\n \"\"\"\n # Function call for selection sort.\n self.selection_sort()\n\n # If previous export exists, backup to archive.\n if os.path.exists(\"../export/export.csv\"):\n shutil.copyfile(\"../export/export.csv\", \"../archive/export.csv\")\n # Overwrite export.csv if it exists.\n with open('../export/export.csv', mode='w', newline=\"\") as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n\n for group in self._queue:\n temp_list = group.get_members()\n csv_writer.writerow([\"Group#\", \"Priority\"])\n csv_writer.writerow([group.get_group_num(), group.get_priority(), \"Ticket#\", \"Age\", \"Height\", \"Email\"])\n index = 0\n for _ in temp_list:\n csv_writer.writerow([\"\", \"\", temp_list[index][0], temp_list[index][1], temp_list[index][2], temp_list[index][3]])\n index += 1\n\n # Close open object.\n csv_file.close()\n\n # Copy backup file to archive.\n shutil.copyfile(\"../backup/backup.csv\", \"../archive/backup.csv\")\n\n # Delete backup file.\n os.remove(\"../backup/backup.csv\")\n\n @ staticmethod\n def priority(entries):\n \"\"\"\n Static function that determines group priority.\n :param entries: Required list.\n :return: Returns a string.\n \"\"\"\n # Local variable declaration and initialization.\n priority = None\n # Input validation.\n if isinstance(entries, list):\n # For loop and selection logic to determine group priority.\n for entry in entries:\n if entry[0] != \"\" and entry[0][0:2] == \"GP\":\n if priority is None:\n priority = \"A\"\n elif priority >= \"A\":\n priority = \"A\"\n elif entry[0] != \"\" and entry[0][0:2] == \"GS\":\n if priority is None:\n priority = \"B\"\n elif priority >= \"B\":\n priority = \"B\"\n elif entry[0] != \"\" and entry[0][0:2] == \"PR\":\n if priority is None:\n priority = \"C\"\n elif priority >= \"C\":\n priority = \"C\"\n elif entry[0] != \"\" and entry[0][0:2] == \"GR\":\n if priority is None:\n priority = \"D\"\n elif priority >= \"D\":\n priority = \"D\"\n else:\n # Raise exception.\n raise ValueError(\"Parameter must be type list.\")\n\n # Return statement.\n return priority\n\n\n\"\"\" Class Group \"\"\"\n\n\nclass Group(object):\n\n def __init__(self, entries, priority, group_num):\n \"\"\"\n Default constructor.\n :param entries: Required list.\n :param priority: Required String.\n :param group_num: Required integer.\n \"\"\"\n # Input validation.\n if isinstance(entries, list) \\\n and isinstance(priority, str) \\\n and isinstance(group_num, int) \\\n and len(entries) <= 4:\n self._group_num = group_num\n\n # Member variable declaration and initialization.\n self._members = entries\n self._priority = priority\n else:\n raise ValueError(\"Invalid parameter.\")\n\n def size(self):\n \"\"\"\n Function that returns the size of group.\n :return: Returns an integer.\n \"\"\"\n # Return statement.\n return len(self._members)\n\n def update(self, visitor):\n \"\"\"\n Function that appends members to groups.\n :param visitor: Required list.\n :return: No return.\n \"\"\"\n # Input validation.\n if isinstance(visitor, list) and self.size() != constants.MAX_GROUP:\n self._members.append(visitor)\n else:\n raise ValueError(\"Parameter must be of type list.\")\n\n def get_priority(self):\n \"\"\"\n Function that returns group priority.\n :return: Returns a string.\n \"\"\"\n # Return statement.\n return self._priority\n\n def get_members(self):\n \"\"\"\n Function that returns group member information.\n :return: Returns a list.\n \"\"\"\n # Return statement.\n return self._members\n\n def get_group_num(self):\n \"\"\"\n Function that returns the group number.\n :return: Returns an int.\n \"\"\"\n # Return statement.\n return self._group_num\n", "sub_path": "definitions/definitions.py", "file_name": "definitions.py", "file_ext": "py", "file_size_in_byte": 16424, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "constants.constants.TICKET_LEN", "line_number": 69, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 69, "usage_type": "name"}, {"api_name": "modules.validate.check_ticket", "line_number": 71, "usage_type": "call"}, {"api_name": "constants.constants.MIN_AGE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 75, "usage_type": "name"}, {"api_name": "constants.constants.MIN_HEIGHT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 78, "usage_type": "name"}, {"api_name": "re.search", "line_number": 82, "usage_type": "call"}, {"api_name": "constants.constants.MIN_ACCP", "line_number": 98, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 98, "usage_type": "name"}, {"api_name": "constants.constants.MAX_ACCP", "line_number": 102, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 102, "usage_type": "name"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 118, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 118, "usage_type": "name"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 130, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 130, "usage_type": "name"}, {"api_name": "modules.validate.update_csv", "line_number": 143, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 234, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 238, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 314, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 317, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 332, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 335, "usage_type": "call"}, {"api_name": "constants.constants.MAX_GROUP", "line_number": 418, "usage_type": "attribute"}, {"api_name": "constants.constants", "line_number": 418, "usage_type": "name"}]}
+{"seq_id": "425677528", "text": "#!/usr/bin/env python3\n\nimport sys\nimport pandas as pd\nimport numpy as np\nimport glob\nfrom functools import reduce\n\npath = \"/study/midusref/DATA/Eyetracking/david_analysis/data_processed/[0-9][0-9][0-9]/*_data_type_compiled.csv\"\n\nfiles = sorted(glob.glob(path))\n\nprint (len(files))\n\nappended_data = []\n\n# file_name = 'df_'\n# full_name_list = []\n# file_counter = 1\n\nfor file in files:\n\n\tdata = pd.read_csv(file)\n\n\tdata_df = data[['iaps_number', 'percent_valid']]\n\tdata_df['iaps_number'] = data_df['iaps_number'].astype(str)\n\t#subject_df = file[['subject_number']]\n\tsubject_number = file.split('/')[7]\n\tdata_df = data_df.rename(columns={'percent_valid':subject_number})\n\t\n\t#data_transposed = data_df.T\n\t#print (data_transposed)\n\n\t#print (data_df)\n\n\tappended_data.append(data_df)\n\n\t# file_counter = str(file_counter)\n\t# full_name = (file_name + file_counter)\n\t# full_name_list.append(full_name)\n\t# file_counter = int(file_counter)\n\t# file_counter += 1\n\n# print (len(full_name_list))\n\n# transposed_data = []\n\n# for data in appended_data:\n# \tdata = data.set_index('iaps_number').T\n# \ttransposed_data.append(data)\n\ndf_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10,df_11, df_12, df_13, df_14, df_15, df_16, df_17, df_18, df_19, df_20, df_21, df_22, df_23, df_24, df_25, df_26, df_27, df_28, df_29, df_30, df_31, df_32, df_33, df_34, df_35, df_36, df_37, df_38, df_39, df_40, df_41, df_42, df_43, df_44, df_45, df_46, df_47, df_48, df_49, df_50, df_51, df_52, df_53, df_54, df_55, df_56, df_57, df_58, df_59, df_60, df_61, df_62, df_63, df_64, df_65, df_66, df_67, df_68, df_69, df_70, df_71, df_72, df_73, df_74, df_75, df_76, df_77, df_78, df_79, df_80, df_81, df_82, df_83, df_84, df_85, df_86, df_87, df_88, df_89, df_90, df_91, df_92, df_93, df_94, df_95, df_96, df_97, df_98, df_99, df_100, df_101, df_102, df_103, df_104, df_105, df_106, df_107, df_108, df_109, df_110, df_111, df_112, df_113, df_114, df_115, df_116, df_117, df_118, df_119, df_120, df_121, df_122, df_123, df_124, df_125 = appended_data\n\ndata_list = [df_1, df_2, df_3, df_4, df_5, df_6, df_7, df_8, df_9, df_10,df_11, df_12, df_13, df_14, df_15, df_16, df_17, df_18, df_19, df_20, df_21, df_22, df_23, df_24, df_25, df_26, df_27, df_28, df_29, df_30, df_31, df_32, df_33, df_34, df_35, df_36, df_37, df_38, df_39, df_40, df_41, df_42, df_43, df_44, df_45, df_46, df_47, df_48, df_49, df_50, df_51, df_52, df_53, df_54, df_55, df_56, df_57, df_58, df_59, df_60, df_61, df_62, df_63, df_64, df_65, df_66, df_67, df_68, df_69, df_70, df_71, df_72, df_73, df_74, df_75, df_76, df_77, df_78, df_79, df_80, df_81, df_82, df_83, df_84, df_85, df_86, df_87, df_88, df_89, df_90, df_91, df_92, df_93, df_94, df_95, df_96, df_97, df_98, df_99, df_100, df_101, df_102, df_103, df_104, df_105, df_106, df_107, df_108, df_109, df_110, df_111, df_112, df_113, df_114, df_115, df_116, df_117, df_118, df_119, df_120, df_121, df_122, df_123, df_124, df_125]\n\n\n# cols = list(transposed_data[0].columns)\n# cols.append(\"iaps_number\")\n# print (cols)\n\n\nfrom functools import reduce\n\nfinal_df = reduce(lambda x,y: pd.merge(x,y, on=\"iaps_number\", how='outer'), data_list)\nfinal_df.rename(columns ={'iaps_number':'subject_number'}, inplace=True)\nprint (final_df)\n\ndata_transposed = final_df.T\nprint (data_transposed)\n\ndata_transposed.to_csv(\"/study/midusref/DATA/Eyetracking/david_analysis/QA/validity_by_iaps.csv\", header=False, na_rep='NA')\n\n\n", "sub_path": "4_compute_stimuli_validity.py", "file_name": "4_compute_stimuli_validity.py", "file_ext": "py", "file_size_in_byte": 3399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "457601554", "text": "\r\n\r\nfrom jira import JIRA\r\n#import json\r\n#from dateutil import parser\r\n#import datetime\r\nimport ccilib as cci\r\n#import getpass\r\n#import sys\r\n#import gspread\r\n#from oauth2client.service_account import ServiceAccountCredentials\r\nimport re\r\n\r\n\r\nclass failed_content:\r\n \r\n def __init__(self, title, cci_file):\r\n self.title = title;\r\n self.cc = cci.cci(cci_file);\r\n self.items = self.cc.get_unique_cars_cgi_all()\r\n self.cla_number = self.cc.get_cla_count();\r\n self.failed_items = set();\r\n self.failed_cqc_items =set();\r\n self.failed_cqa_items = set();\r\n self.issues = set()\r\n self.activities = set(self.cc.get_cla_numbers(self.items));\r\n self.extra_failed_cla = set()\r\n\r\n \r\n def failed_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_items));\r\n \r\n def all_failed_cla(self):\r\n return self.failed_cla() | self.extra_failed_cla\r\n \r\n def failed_cqc_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_cqc_items));\r\n \r\n def failed_cqa_cla(self):\r\n return set(self.cc.get_cla_numbers(self.failed_cqa_items));\r\n \r\n def cqc_items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_cqc_items)/self.cla_number\r\n return fr;\r\n \r\n def cqa_items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_cqa_items)/self.cla_number\r\n return fr;\r\n \r\n def items_failure_rate(self):\r\n fr = 'N/A'\r\n if self.cla_number !=0:\r\n fr = len(self.failed_items)/self.cla_number\r\n return fr;\r\n \r\ndef issue_has_cgi(issue, item):\r\n des = issue.fields.description;\r\n ex = item + \"[^>]*<\"\r\n findings = re.findall(ex, des)\r\n return len(findings)\r\n\r\n\r\ndef item_failed(CGI):\r\n \r\n failed = 0;\r\n jac = JIRA('https://jira.cengage.com');\r\n query ='project = MTQA AND text ~ ' + CGI;\r\n issues = jac.search_issues(query);\r\n for issue in issues:\r\n if str(issue.fields.status) in ('Open','In Progress', 'Reopened') or str(issue.fields.resolution)=='Fixed' :\r\n failed = 1;\r\n return failed;\r\n\r\ndef find_all_issues(query):\r\n# query ='project = MTQA AND issuetype = Bug AND labels = back_half AND labels in (WLCQC)';\r\n jac = JIRA('https://jira.cengage.com');\r\n bunch = 50;\r\n issues = [];\r\n while bunch == 50:\r\n print('1')\r\n iss = jac.search_issues(query, startAt = len(issues) , maxResults = 50);\r\n bunch = len(list(iss))\r\n issues = issues + list(iss);\r\n print('2')\r\n return issues;\r\n\r\nif __name__ == \"__main__\":\r\n \r\n \r\n titles = {'Conectados':[1]}\r\n \r\n query12 = 'project = MTQA AND issuetype = Bug AND labels = WL_2020 AND labels in (WLCQC, WLCQA) AND resolution in (Unresolved, Fixed) and component in (Content) and priority in (\"High/Critical\", \"Blocker/Showstopper\") AND labels = Conectados and bucket = \"Phase 3\"'\r\n query34 = 'project = MTQA AND issuetype = Bug AND labels = WL_2020 AND labels in (WLCQC, WLCQA) AND resolution in (Unresolved) and component in (Content) and priority in (\"Medium/Major\", \"Low/Minor\")'\r\n \r\n issues = find_all_issues(query12)\r\n print('issues = ', len(issues), '\\n');\r\n\r\n for key in titles: \r\n title = key;\r\n cci_file ='C:\\\\Users\\\\gyesayan\\\\CARS\\\\CCI\\\\' + title + '_CCI.csv';\r\n rut = failed_content(title, cci_file)\r\n \r\n for item in rut.items:\r\n for issue in issues:\r\n\r\n if issue_has_cgi(issue, item) and title in issue.fields.labels:\r\n# print(item, issue.key, issue_has_cgi(issue, item))\r\n if \"WLCQA\" in issue.fields.labels:\r\n rut.failed_cqa_items.add(item);\r\n if \"WLCQC\" in issue.fields.labels:\r\n rut.failed_cqc_items.add(item);\r\n rut.failed_items.add(item);\r\n rut.issues.add(issue.key);\r\n \r\n for cla in rut.activities:\r\n for issue in issues:\r\n if (cla in issue.raw['fields'][\"description\"] or cla in issue.fields.summary) and title in issue.fields.labels:\r\n# print(cla, issue)\r\n rut.extra_failed_cla.add(cla);\r\n rut.issues.add(issue.key);\r\n\r\n print(title)\r\n\r\n items = len(set(rut.items))\r\n failed_items = len(set(rut.failed_items))\r\n print(\"Overall unique items: \",items)\r\n print(\"failed items: \",failed_items)\r\n print(\"item failure rate: \",failed_items/items)\r\n print(\"failed CLA: \",len(rut.all_failed_cla()))\r\n\r\n\r\n\r\n \r\n", "sub_path": "failure_rate2020.py", "file_name": "failure_rate2020.py", "file_ext": "py", "file_size_in_byte": 4724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "ccilib.cci", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 63, "usage_type": "call"}, {"api_name": "jira.JIRA", "line_number": 70, "usage_type": "call"}, {"api_name": "jira.JIRA", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "278736257", "text": "import pandas as pd\nimport sys\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\ndef get_stats(stat='BaseSpirit'):\n data = pd.read_csv('data/recipes.csv')\n data.fillna('nothing', inplace=True)\n # desired_cols = ['Glass','Occasions','Flavor','BaseSpirit','CocktailType','Preparation','Served','Strength','Difficulty','Hours','Theme','Brands','Garnish']\n stats = defaultdict(int)\n \n for i, row in data.iterrows():\n for col in data.columns:\n if col == stat:\n names = row[col].split(\";\")\n for name in names:\n if name not in stats:\n stats[name] = 1\n else:\n stats[name] += 1\n\t \n for i in sorted(stats, key = stats.get, reverse = True):\n print(i + \": \" + str(stats[i]))\n print(\"Missing:\" + str(stats['nothing']))\n \n plt.barh(*zip(*stats.items()))\n plt.show()\n\t\t\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n get_stats(sys.argv[1])\n else:\n get_stats()", "sub_path": "stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "587507092", "text": "from flask import json\nimport requests\nfrom requests.api import request\n\nURL = \"http://127.0.0.1:5000\"\nBACKUP_URL = \"https://retro-video-store-api.herokuapp.com\"\n\n\n'''\n =============================================\n HELPER PRINTS\n =============================================\n'''\n\ndef bar_break():\n print(\"\\n==========================\\n\")\n\ndef list_options_ee():\n options = {\n \"1\" : \"Add Video to Store Stock\",\n \"2\" : \"Edit Video Info\",\n \"3\" : \"Remove Video From Inventory\",\n \"4\" : \"View Current Store Stock\",\n \"5\" : \"View Video Info\",\n \"6\" : \"Add New Customer\",\n \"7\" : \"Edit Existing Customer\",\n \"8\" : \"Delete Existing Customer\",\n \"9\" : \"View Existing Customer Records\",\n \"10\" : \"View All Existing Customers\",\n \"11\" : \"Check Out\",\n \"12\" : \"Check In\"\n }\n\n bar_break()\n print(\"Here are your available options:\\n\")\n for choice in options:\n print(f\"Option {choice}. {options[choice]}\")\n\n bar_break()\n\n return options\n\ndef list_options_cust():\n options = {\n\n }\n pass\n\n'''\n =============================================\n EMPLOYEE OPTION FUNCTIONS\n =============================================\n'''\n\ndef add_video():\n print(\"Enter video info below:\")\n request_body = {}\n request_body[\"title\"] = input(\"Title: \")\n request_body[\"release_date\"] = input(\"Release date: \")\n request_body[\"total_inventory\"] = input(\"Total inventory: \")\n\n response = requests.post(URL +\"/videos\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef edit_video():\n print(\"Enter updated video info below:\")\n request_body = {}\n video_id = input(\"Video ID: \")\n request_body[\"title\"] = input(\"Title: \")\n request_body[\"release_date\"] = input(\"Release date: \")\n request_body[\"total_inventory\"] = input(\"Total inventory: \")\n\n response = requests.put(URL +\"/videos/\" +video_id, json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef remove_video():\n print(\"DELETE VIDEO - THIS ACTION CANNOT BE UNDONE\")\n if input(\"Are you sure? Y/N \") != \"Y\":\n print(\"ACTION CANCELLED\")\n return\n \n video_id = input(\"Video ID: \")\n response = requests.delete(URL +\"/videos/\" +video_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_video_stock():\n print(\"All Videos in Store Stock:\")\n response = requests.get(URL +\"/videos\")\n print(json.dumps(response.json(), indent=2))\n return\n\ndef view_single_video():\n print(\"Video Info Request:\")\n video_id = input(\"Video ID: \")\n response = requests.get(URL +\"/videos/\" +video_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef add_customer():\n print(\"Enter customer info below:\")\n request_body = {}\n request_body[\"name\"] = input(\"Name: \")\n request_body[\"phone\"] = input(\"Phone number: \")\n request_body[\"postal_code\"] = input(\"Postal code: \")\n\n response = requests.post(URL +\"/customers\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef edit_customer():\n print(\"Enter updated customer info below:\")\n request_body = {}\n customer_id = input(\"Customer ID: \")\n request_body[\"name\"] = input(\"Name: \")\n request_body[\"phone\"] = input(\"Phone number: \")\n request_body[\"postal_code\"] = input(\"Postal code: \")\n \n response = requests.put(URL +\"/customers/\" +customer_id, json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef delete_customer():\n print(\"DELETE CUSTOMER - THIS ACTION CANNOT BE UNDONE\")\n if input(\"Are you sure? Y/N \") != \"Y\":\n print(\"ACTION CANCELLED\")\n return\n \n customer_id = input(\"Customer ID: \")\n response = requests.delete(URL +\"/customers/\" +customer_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_customer():\n print(\"Customer Info Request:\")\n customer_id = input(\"Customer ID: \")\n response = requests.get(URL +\"/customers/\" +customer_id)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef view_all_customers():\n print(\"All Active Customer Accounts:\")\n response = requests.get(URL +\"/customers\")\n print(json.dumps(response.json(), indent=2))\n return\n\ndef checking_out():\n print(\"Check Out a Video:\")\n request_body = {}\n request_body[\"customer_id\"] = int(input(\"Customer ID: \"))\n request_body[\"video_id\"] = int(input(\"Video ID: \"))\n\n response = requests.post(URL +\"/rentals/check-out\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\ndef checking_in():\n print(\"Check In a Video:\")\n request_body = {}\n request_body[\"customer_id\"] = int(input(\"Customer ID: \"))\n request_body[\"video_id\"] = int(input(\"Video ID: \"))\n\n response = requests.post(URL +\"/rentals/check-in\", json=request_body)\n print(json.dumps(response.json(), indent=1))\n return\n\n'''\n =============================================\n CUSTOMER OPTION FUNCTIONS\n =============================================\n'''\n\ndef find_videos_by():\n print(\"I'm sorry, that feature is not yet available in your area\")\n return\n\ndef check_current_rentals():\n print(\"I'm sorry, that feature is not yet available in your area\")\n return\n\n'''\n =============================================\n MAIN\n =============================================\n'''\n\ndef main(in_use=True, is_employee=False):\n print(\"WELCOME TO RETRO VIDEO STORE\")\n\n ee_id = input(\"Employee? Please enter your 4 digit id. Hit Enter to continue as a customer.\\n\")\n if len(ee_id) == 4 and ee_id.isdigit():\n print(f\"Welcome to work, Employee {ee_id}\")\n is_employee = True\n list_options_ee()\n\n while is_employee and in_use:\n func_call_dict = {\n \"1\" : add_video,\n \"2\" : edit_video,\n \"3\" : remove_video,\n \"4\" : view_video_stock,\n \"5\" : view_single_video,\n \"6\" : add_customer,\n \"7\" : edit_customer,\n \"8\" : delete_customer,\n \"9\" : view_customer,\n \"10\" : view_all_customers,\n \"11\" : checking_out,\n \"12\" : checking_in\n }\n\n choice = None\n while choice not in func_call_dict:\n choice = input(\"What would you like to do? Q to quit.\\n\")\n\n if choice == \"Q\" or choice == 'q':\n print(f\"Goodbye Retro Video Store Employee {ee_id}!\")\n bar_break()\n return\n \n func_call_dict[choice]()\n bar_break()\n \n while in_use:\n func_call_dict = {\n \"1\" : find_videos_by,\n \"2\" : check_current_rentals\n }\n\n choice = None\n while choice not in func_call_dict:\n choice = input(\"What would you like to do? Q to quit.\\n\")\n\n if choice == \"Q\" or choice == 'q':\n print(f\"Goodbye Retro Video Store Employee {ee_id}!\")\n bar_break()\n return\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "requests.post", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 63, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 75, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 86, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 92, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 99, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 110, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 122, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 133, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 140, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 146, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 156, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 166, "usage_type": "name"}]}
+{"seq_id": "558839874", "text": "import unittest\nimport mock\n\nfrom StringIO import StringIO\n\nfrom superlance.compat import xmlrpclib\nfrom superlance.oome_monitor import OomeMonitor, OomeProcess\nfrom superlance.tests.dummy import (DummyRPCServer,\n DummySupervisorRPCNamespace)\n\nclass TestOomeProcess(unittest.TestCase):\n \"\"\"\n Test class to test OomeProcess methods and properties\n \"\"\"\n @mock.patch('sys.stderr', new_callable=StringIO)\n def setUp(self, mock_stderr):\n \"\"\"\n Setup function to initialise tests\n \"\"\"\n self.stderr = mock_stderr\n process_object = DummySupervisorRPCNamespace.all_process_info[0]\n self.process = OomeProcess(process_object, oome_file='oome_file')\n \n def test_init(self):\n \"\"\"\n Tests if OomeProcess could be created\n \"\"\"\n self.assertTrue(isinstance(self.process, OomeProcess))\n \n def test_env_vars(self):\n \"\"\"\n Tests getting the env_vars property. Dummy env var resembles real\n environ file inside /proc//\n \"\"\"\n dummy_env_var = (\"SUPERVISOR_GROUP_NAME=test_server\\x00SUPERVISOR_PROC\"\n \"ESS_NAME=test_server\\x00HOMEDIR=homedir\\x00SUPERVISOR_ENABLED=1\"\n \"\\x00SUPERVISOR_SERVER_URL=unix:///tmp/supervisor.sock\\x00OOME_FIL\"\n \"E=oome_file\")\n expected = {'OOME_FILE': 'oome_file', 'HOMEDIR': 'homedir'}\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data=dummy_env_var), create=True) as m:\n self.assertEqual(sorted(expected.items()),\n sorted(self.process.env_vars.items()))\n \n def test_get_oome_file_oome_file_init(self):\n \"\"\"\n Tests getting the oome_file name property if it was set during init\n \"\"\"\n self.assertEqual('oome_file', self.process.oome_file)\n \n def test_get_oome_file_oome_file_env(self):\n \"\"\"\n Tests getting the oome_file name property if $OOME_FILE is in env vars\n \"\"\"\n self.process._oome_file = None\n self.process._env_vars = {'OOME_FILE': 'oome_file_environ'}\n self.assertEqual('oome_file_environ', self.process.oome_file)\n \n def test_get_oome_file_homedir_env(self):\n \"\"\"\n Tests getting the oome_file name property if $HOMEDIR is in env vars\n \"\"\"\n self.process._oome_file = None\n self.process._env_vars = {'HOMEDIR': 'homedir'}\n self.assertEqual('homedir/work/oome', self.process.oome_file)\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_get_oome_file_cwd(self, mock_readlink):\n \"\"\"\n Tests getting the oome_file name property if no env variables were set\n \"\"\"\n mock_readlink.return_value = 'cwd'\n self.process._oome_file = None\n self.process._env_vars = {'USELESS_VAR': '3.141599'}\n self.assertEqual('cwd/work/oome', self.process.oome_file)\n \n def test_set_oome_file(self):\n \"\"\"\n Tests setting oome_file property\n \"\"\"\n self.process.oome_file = 'real_oome_file'\n self.assertEqual('real_oome_file', self.process.oome_file)\n \n\n @mock.patch('superlance.oome_monitor.os.path.isfile',\n return_value=True)\n def test_check_oome_file_exists(self, mock_os_path):\n \"\"\"\n Tests checking oome_file existence\n \"\"\"\n self.assertTrue(self.process.check_oome_file())\n \n @mock.patch('superlance.oome_monitor.os.path.isfile',\n return_value=False)\n def test_check_oome_file_does_not_exist(self, mock_os_path):\n \"\"\"\n Tests checking oome_file non existence\n \"\"\"\n self.assertFalse(self.process.check_oome_file())\n \n @mock.patch('superlance.oome_monitor.os.remove', return_value=True)\n def test_oome_file_delete(self, mock_os_remove):\n \"\"\"\n Tests deleting the oome file\n \"\"\"\n self.process.delete_oome_file()\n self.assertEqual(\"oome file oome_file was deleted\\n\",\n self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.remove',\n side_effect=OSError('file'))\n def test_oome_file_delete(self, mock_os_remove):\n \"\"\"\n Tests deleting the oome file failure\n \"\"\"\n self.process.delete_oome_file()\n self.assertEqual(\"oome file could not be removed: file\\n\",\n self.stderr.getvalue())\n \n\nclass TestOomeMonitor(unittest.TestCase):\n \"\"\"\n Test class to test OomeMonitor methods and properties\n \"\"\"\n @mock.patch('superlance.oome_monitor.ExternalService')\n @mock.patch('sys.stdin', new_callable=StringIO)\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('sys.stderr', new_callable=StringIO)\n def setUp(self, mock_stderr, mock_stdout, mock_stdin, mock_ext_service):\n \"\"\"\n Setup function to initialise tests\n \"\"\"\n rpc = DummyRPCServer()\n process_name = ['foo']\n self.stderr = mock_stderr\n self.stdout = mock_stdout\n self.stdin = mock_stdin\n self.oome_monitor_all = OomeMonitor(rpc, all=True)\n self.oome_monitor_single = OomeMonitor(rpc, process_name=process_name)\n dummy_supervisor = DummySupervisorRPCNamespace()\n self.oome_monitor_all.rpc.supervisor = dummy_supervisor\n ext_service = mock_ext_service('some script')\n self.oome_monitor_single_ext_svc = OomeMonitor(rpc,\n process_name=process_name, ext_service=ext_service)\n \n def test_init(self):\n \"\"\"\n Tests OomeMonitor object creation\n \"\"\"\n self.assertTrue(isinstance(self.oome_monitor_all, OomeMonitor))\n self.assertTrue(isinstance(self.oome_monitor_single, OomeMonitor))\n \n def test_generate_processes(self):\n \"\"\"\n Tests OomeMonitor _generate_processes method\n \"\"\"\n self.assertEqual(len(self.oome_monitor_all.processes),\n len(DummySupervisorRPCNamespace.all_process_info))\n self.assertEqual(len(self.oome_monitor_single.processes), 1)\n \n def test_write_stderr(self):\n \"\"\"\n Tests write_stderr\n \"\"\"\n self.oome_monitor_all.write_stderr('some message')\n self.assertEqual('some message\\n',\n self.stderr.getvalue())\n\n def test_procs(self):\n \"\"\"\n Tests OomeMonitor.procs property\n \"\"\"\n self.assertEqual(self.oome_monitor_all.procs,\n DummySupervisorRPCNamespace.all_process_info)\n # It should match to \"foo\" process defined in Dummy\n self.assertEqual(self.oome_monitor_single.procs,\n DummySupervisorRPCNamespace.all_process_info[:1])\n\n def test_restart(self):\n \"\"\"\n Tests OomeMonitor.restart method\n \"\"\"\n self.oome_monitor_all.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.oome_monitor_single.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.oome_monitor_single_ext_svc.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.assertEqual('foo restarted\\nfoo restarted\\nfoo restarted\\n',\n self.stderr.getvalue())\n \n def test_failed_restart(self):\n \"\"\"\n Tests OomeMonitor.restart method failure\n \"\"\"\n self.oome_monitor_all.rpc.supervisor.stopProcess = mock.MagicMock(\n side_effect=xmlrpclib.Fault('stop', 'error'))\n self.oome_monitor_all.rpc.supervisor.startProcess = mock.MagicMock(\n side_effect=xmlrpclib.Fault('start', 'error'))\n self.oome_monitor_all.restart(\n DummySupervisorRPCNamespace.all_process_info[0])\n self.assertEqual(\"Failed to stop process foo: \\n\"\n \"Failed to start process foo: \\n\",\n self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_run(self, mock_readlink):\n \"\"\"\n Functional test for run() all method with one of the processes (bar) having\n an oome file. OomeMonitor will try to delete the mocked oome file\n and restart the process (using dummy rpc.supervisor)\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n # returning that the process has an oome file\n self.oome_monitor_all.processes[1].check_oome_file = mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_all.processes[1].delete_oome_file = mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_all.run(test=True)\n self.assertEqual(\"bar restarted\\n\", self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_run_sigle(self, mock_readlink):\n \"\"\"\n Functional test for run() single method with the processes (foo) having\n an oome file. OomeMonitor will try to delete the mocked oome file\n and restart the process (using dummy rpc.supervisor)\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n # returning that the process has an oome file\n self.oome_monitor_single.processes[0].check_oome_file = \\\n mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_single.processes[0].delete_oome_file = \\\n mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_single.run(test=True)\n self.assertEqual(\"foo restarted\\n\", self.stderr.getvalue())\n \n @mock.patch('superlance.oome_monitor.os.readlink')\n def test_dry_run(self, mock_readlink):\n \"\"\"\n Functional test for run() method with one of the processes (bar) having\n an oome file. OomeMonitor will not try to delete the mocked oome file\n or restart the process due to dry run\n \"\"\"\n self.stdin.write('eventname:TICK len:0\\n')\n self.stdin.seek(0)\n self.oome_monitor_all.dry = True\n # returning that the process has an oome file\n self.oome_monitor_all.processes[1].check_oome_file = mock.MagicMock()\n # mocking the actual file delete\n self.oome_monitor_all.processes[1].delete_oome_file = mock.MagicMock()\n with mock.patch('superlance.oome_monitor.open',\n mock.mock_open(read_data='test'), create=True) as m:\n self.oome_monitor_all.run(test=True)\n self.assertEqual(\"oome file is detected for bar, not restarting due to\"\n \" dry-run\\n\", self.stderr.getvalue())\n \nif __name__ == '__main__':\n unittest.main()", "sub_path": "superlance/tests/oome_monitor_test.py", "file_name": "oome_monitor_test.py", "file_ext": "py", "file_size_in_byte": 10851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 21, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 21, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeProcess", "line_number": 22, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 15, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 15, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeProcess", "line_number": 28, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 40, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 41, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 67, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 85, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 93, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 101, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 110, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 121, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummyRPCServer", "line_number": 133, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 138, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 139, "usage_type": "call"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 140, "usage_type": "call"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 143, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 125, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 126, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 126, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 127, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 127, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 128, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 128, "usage_type": "name"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 150, "usage_type": "argument"}, {"api_name": "superlance.oome_monitor.OomeMonitor", "line_number": 151, "usage_type": "argument"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 158, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 158, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 174, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 174, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 177, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 177, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 184, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 184, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 186, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 186, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 188, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 188, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 196, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib.Fault", "line_number": 197, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib", "line_number": 197, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 198, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib.Fault", "line_number": 199, "usage_type": "call"}, {"api_name": "superlance.compat.xmlrpclib", "line_number": 199, "usage_type": "name"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace.all_process_info", "line_number": 201, "usage_type": "attribute"}, {"api_name": "superlance.tests.dummy.DummySupervisorRPCNamespace", "line_number": 201, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 216, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 218, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 219, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 220, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 206, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 235, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 238, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 239, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 240, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 224, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 255, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 257, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 258, "usage_type": "call"}, {"api_name": "mock.mock_open", "line_number": 259, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 244, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 265, "usage_type": "call"}]}
+{"seq_id": "170910237", "text": "from sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nimport pandas as pd\nfrom sklearn.utils.testing import all_estimators\n\n# scikit-learn 0.20.3 에서 31개\n# scikit-learn 0.21.2 에서 40개중 4개만 돔.\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\niris_data = pd.read_csv(\"./keras/ml/Data/iris2.csv\", encoding= 'utf-8' )\n\n# 붓꽃 데이이터 레이블과 입력 데이터로 분리하기\ny = iris_data.loc[:, \"Name\"]\nx = iris_data.loc[:,[ \"SepalLength\",\"SepalWidth\",\"PetalLength\",\"PetalWidth\"]]\n\n# 학습 전용과 테스트 전용 분리하기\nwarnings.filterwarnings('ignore')\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n# 그리드 서치에서 사용 할 매개 변수 -- (*1)\n\nparameters = [\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5]},\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5], \"min_samples_leaf\":[1,2,3,4]},\n {\"n_estimators\": [1,10,100,1000], \"min_samples_split\":[2,3,4,5], \"bootstrap\": [\"True\", \"False\"]}\n]\n\n# 그리드 서치 --- (*2)\nkfold_cv = KFold(n_splits= 5, shuffle=True)\nmodel = GridSearchCV( RandomForestClassifier(), parameters, cv=kfold_cv)\nmodel.fit(x_train, y_train)\nprint(\"/n-------------------\")\nprint(\" 최적의 매개 변수 = \", model.best_estimator_)\n\n# 최적의 매개 변수로 평가하기 ---(*3)\ny_pred = model.predict(x_test)\nprint(\"/n-------------------\")\nprint(\"최종 정답률 = \", accuracy_score(y_test, y_pred))", "sub_path": "ml/m10_gridSearch2_rf.py", "file_name": "m10_gridSearch2_rf.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 45, "usage_type": "call"}]}
+{"seq_id": "295743145", "text": "import tests.aswwu.behaviors.elections.election.election_subtests as election_subtests\nimport tests.aswwu.behaviors.elections.position.position_requests as position_requests\nimport tests.aswwu.behaviors.elections.vote.vote_requests as vote_requests\nimport tests.aswwu.behaviors.elections.vote.vote_subtests as vote_subtests\nimport tests.aswwu.behaviors.auth.auth_subtests as auth_subtests\nimport tests.aswwu.data.paths as paths\nimport tests.utils as utils\nimport json\nfrom tests.conftest import testing_server\n\nPOSITION_DATA = {\n 'position': 'Senator',\n 'election_type': 'aswwu',\n 'active': 'True',\n 'order': 1\n}\n\n\ndef test_post_vote(testing_server):\n admin_session = election_subtests.create_elections_admin()\n election_id = election_subtests.assert_post_dynamic_election(admin_session)['id']\n position_resp = position_requests.post_position(admin_session, POSITION_DATA['position'],\n POSITION_DATA['election_type'],\n POSITION_DATA['active'], POSITION_DATA['order'])\n position_id = json.loads(position_resp.text)['id']\n vote_subtests.create_votes(admin_session, election_id, position_id)\n\n\ndef test_post_vote_candidates(testing_server):\n pass\n\n\ndef test_get_vote(testing_server):\n admin_session = election_subtests.create_elections_admin()\n election_id = election_subtests.assert_post_dynamic_election(admin_session)['id']\n position_resp = position_requests.post_position(admin_session, POSITION_DATA['position'],\n POSITION_DATA['election_type'],\n POSITION_DATA['active'], POSITION_DATA['order'])\n position_id = json.loads(position_resp.text)['id']\n vote_data = vote_subtests.create_votes(admin_session, election_id, position_id)\n users = utils.load_csv(paths.USERS_PATH)\n\n for count, user in enumerate(users):\n user_session = auth_subtests.assert_verify_login(user)[1]\n resp = vote_requests.get_vote(user_session, position_id, user['username'])\n assert (resp.status_code == 200)\n resp_text = json.loads(resp.text)['votes']\n for vote in resp_text:\n vote_subtests.assert_vote_data(vote, vote_data[user['username']])\n", "sub_path": "tests/aswwu/behaviors/elections/vote/test_vote.py", "file_name": "test_vote.py", "file_ext": "py", "file_size_in_byte": 2317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.create_elections_admin", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 20, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.assert_post_dynamic_election", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 21, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests.post_position", "line_number": 22, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests", "line_number": 22, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.create_votes", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.create_elections_admin", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 34, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests.assert_post_dynamic_election", "line_number": 35, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.election.election_subtests", "line_number": 35, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests.post_position", "line_number": 36, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.position.position_requests", "line_number": 36, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.create_votes", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 40, "usage_type": "name"}, {"api_name": "tests.utils.load_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "tests.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.aswwu.data.paths.USERS_PATH", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tests.aswwu.data.paths", "line_number": 41, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.auth.auth_subtests.assert_verify_login", "line_number": 44, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.auth.auth_subtests", "line_number": 44, "usage_type": "name"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_requests.get_vote", "line_number": 45, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_requests", "line_number": 45, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests.assert_vote_data", "line_number": 49, "usage_type": "call"}, {"api_name": "tests.aswwu.behaviors.elections.vote.vote_subtests", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "385379372", "text": "import enum\n\n\nclass Action(enum.Enum):\n new = 0 # process have not arrive at CPU\n ready = 1 # ready to use CPU\n burst = 2 # actively using CPU\n block = 3 # I/O time\n ternimated = 4 # process terminates\n \n enter_CPU = 5\n leave_CPU = 6\n preempted = 7\n\n\nclass Process:\n def __init__(self, name: str):\n self.name = name\n self.arrival_time = 0 # process arrival time, in MILLISECONDS\n\n self.burst_time = [] # CPU burst time in MS\n self.block_time = [] # I/O block time in MS\n self.index = 0\n self.remain = 0\n\n # process current status\n self.action = Action.new\n # time of the process finish current status in MILLISECONDS. If process\n # enters CPU at x ms, and takes y ms CPU burst, action_leave will be\n # x + y\n self.action_enter = 0\n self.action_leave = 0\n\n self.wait_time = 0\n self.preempt_count = 0\n self.switch_count = 0\n self.tau = 0\n\n # use setattr(object, name, value) to add attribute with your needs\n\n\n\"\"\"\nLinear congruential generator, generate random numbers\nAlgorithm is inherited from POSIX\n\"\"\"\n\n\nclass LCG:\n def __init__(self):\n self.seed = 0\n\n # initialize seed, detail implementation see man srand48\n def srand48(self, seedval: int):\n self.seed = ((seedval & 0xFFFFFFFF) << 16) | 0x330E\n\n # get random number, detail implementation see man drand48\n def drand48(self) -> float:\n self.seed = (0x5DEECE66D * self.seed + 0xB) & 0xffffffffffff\n return float(self.seed / 0x1000000000000)", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "attribute"}]}
+{"seq_id": "578582790", "text": "from operator import itemgetter\nimport os\nimport tkinter as tk\nimport ttk\nimport model\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import func\n\nsession = sessionmaker(bind=model.db)\nsession = session()\n\nroot = tk.Tk()\nroot.title(\"Rapport\")\n\ncontainer = ttk.Frame(root)\ncontainer.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))\ninfo_message = tk.StringVar()\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\n\ndef aantalreis():\n \"\"\"\n Hier wordt een rapport gemaakt van het aantal reizen per reiziger en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_aantal reizen per ovchipkaart.txt\", 'w')\n ovgebruikers = session.query(model.Reis, func.count(model.Reis.ov_id)).group_by(model.Reis.ov_id)\n ovgebruikers = sorted(ovgebruikers, key=itemgetter(1), reverse=True)\n for ovgebruiker in ovgebruikers:\n ovgebruiker_name = session.query(model.Reis).filter_by(ov_id=ovgebruiker[0].ov_id).first().ov_id\n file.write(\"OV-chipkaart: {ovgebruiker} Totaal aantal reizen: {aantal_reizen}\\n\".format(ovgebruiker=ovgebruiker_name, aantal_reizen=ovgebruiker[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_aantal reizen per ovchipkaart.txt\".format(dir=current_dir))\n\ndef populairbesteming():\n \"\"\"\n Hier wordt een rapport gemaakt van de populairste bestemmingen en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_populairste-bestemmingen.txt\", 'w')\n stations = session.query(model.Reis, func.count(model.Reis.eindstation_id)).group_by(model.Reis.eindstation_id).all()\n stations = sorted(stations, key=itemgetter(1), reverse=True)\n for station in stations:\n station_name = session.query(model.Station).filter_by(station_id=station[0].eindstation_id).first().station_naam\n file.write(\"Station {station} is {aantal_bezocht} keer de bestemming geweest van een OV-gebruiker.\\n\".format(station=station_name, aantal_bezocht=station[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_populairste-bestemmingen.txt\".format(dir=current_dir))\n\n\ndef populairvertrek():\n \"\"\"\n Hier wordt een rapport gemaakt van de populairste vertrekstations en die wordt in een tesktbestand opgeslagen.\n \"\"\"\n file = open(\"_populairste-vertrekstations.txt\", 'w')\n stations = session.query(model.Reis, func.count(model.Reis.beginstation_id)).group_by(model.Reis.beginstation_id).all()\n stations = sorted(stations, key=itemgetter(1), reverse=True)\n for station in stations:\n station_name = session.query(model.Station).filter_by(station_id=station[0].beginstation_id).first().station_naam\n file.write(\"Vanaf station {station} is {aantal_bezocht} keer een OV-gebruiker vertrokken.\\n\".format(station=station_name, aantal_bezocht=station[1]))\n file.close()\n info_message.set(\"Het bestand is opgeslagen op:\\n{dir}\\\\_populairste-vertrekstations.txt\".format(dir=current_dir))\n\n\ntk.Label(container, text=\"Raport -Kies een van de opties\", anchor=\"center\", font=\"-size 10 -weight bold\").grid(column=0, row=0, columnspan=3, sticky=(tk.W, tk.E))\ntk.Label(container, textvariable=info_message, wraplength=500).grid(column=0, columnspan=3, row=1, sticky=(tk.W, tk.E))\ntk.Button(container, text=\"De populairste bestemmingen\", command=populairbesteming).grid(column=0, row=2, sticky=tk.W)\ntk.Button(container, text=\"De populairste vertrekstations\", command=populairvertrek).grid(column=1, row=2, sticky=tk.W)\ntk.Button(container, text=\"Het aantal reizen per ovchipkaart\", command=aantalreis).grid(column=2, row=2, sticky=tk.W)\n\n\nroot.mainloop()\n", "sub_path": "report.py", "file_name": "report.py", "file_ext": "py", "file_size_in_byte": 3627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "57", "api": [{"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 9, "usage_type": "call"}, {"api_name": "model.db", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 12, "usage_type": "call"}, {"api_name": "ttk.Frame", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.N", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.S", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 18, "usage_type": "call"}, {"api_name": "model.Reis", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 25, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 26, "usage_type": "call"}, {"api_name": "model.Reis", "line_number": 28, "usage_type": "attribute"}, {"api_name": "model.Reis", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 38, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 39, "usage_type": "call"}, {"api_name": "model.Station", "line_number": 41, "usage_type": "attribute"}, {"api_name": "model.Reis", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.count", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 52, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 53, "usage_type": "call"}, {"api_name": "model.Station", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 61, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 65, "usage_type": "attribute"}]}
+{"seq_id": "16446545", "text": "''' This module provides functions for embedding Bokeh plots in various\ndifferent ways.\n\nThere are a number of different combinations of options when embedding\nBokeh plots. The data for the plot can be contained in the document,\nor on a Bokeh server, or in a sidecar JavaScript file. Likewise, BokehJS\nmay be inlined in the document, or loaded from CDN or a Bokeh server.\n\nThe functions in ``bokeh.embed`` provide functionality to embed in all\nthese different cases.\n\n'''\n\nimport uuid\n\nfrom .protocol import serialize_json\nfrom .resources import Resources\nfrom .templates import (\n AUTOLOAD, AUTOLOAD_SERVER, AUTOLOAD_STATIC, FILE,\n NOTEBOOK_DIV, PLOT_DIV, PLOT_JS, PLOT_SCRIPT, RESOURCES\n)\nfrom .utils import encode_utf8\n\ndef components(plot_object, resources):\n ''' Return HTML components to embed a Bokeh plot.\n\n The data for the plot is stored directly in the returned HTML.\n\n .. note:: The returned components assume that BokehJS resources\n are **already loaded**.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n resources (Resources, optional) : BokehJS resources config\n\n Returns:\n (script, div) : UTF-8 encoded\n\n '''\n ref = plot_object.ref\n elementid = str(uuid.uuid4())\n\n js = PLOT_JS.render(\n elementid = elementid,\n modelid = ref[\"id\"],\n modeltype = ref[\"type\"],\n all_models = serialize_json(plot_object.dump()),\n )\n script = PLOT_SCRIPT.render(\n plot_js = resources.js_wrapper(js),\n )\n div = PLOT_DIV.render(elementid=elementid)\n\n return encode_utf8(script), encode_utf8(div)\n\n\ndef notebook_div(plot_object):\n ''' Return HTML for a div that will display a Bokeh plot in an\n IPython Notebook\n\n The data for the plot is stored directly in the returned HTML.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n\n Returns:\n div : UTF-8 encoded HTML text\n\n .. note:: Assumes ``bokeh.load_notebook()`` or the equivalent has\n already been executed.\n\n '''\n ref = plot_object.ref\n resources = Resources()\n elementid = str(uuid.uuid4())\n\n js = PLOT_JS.render(\n elementid = elementid,\n modelid = ref[\"id\"],\n modeltype = ref[\"type\"],\n all_models = serialize_json(plot_object.dump()),\n )\n script = PLOT_SCRIPT.render(\n plot_js = resources.js_wrapper(js),\n )\n div = PLOT_DIV.render(elementid=elementid)\n html = NOTEBOOK_DIV.render(\n plot_script = script,\n plot_div = div,\n )\n return encode_utf8(html)\n\n\ndef file_html(plot_object, resources, title, template=FILE):\n ''' Return an HTML document that embeds a Bokeh plot.\n\n The data for the plot is stored directly in the returned HTML.\n\n Args:\n plot_object (PlotObject) : Bokeh object to render\n typically a Plot or PlotContext\n resources (Resources) : a resource configuration for BokehJS assets\n title (str) : a title for the HTML document ```` tags\n template (Template, optional) : HTML document template (default: FILE)\n A Jinja2 Template, see bokeh.templates.FILE for the required\n template parameters\n\n Returns:\n html : standalone HTML document with embedded plot\n\n '''\n plot_resources = RESOURCES.render(\n js_raw = resources.js_raw,\n css_raw = resources.css_raw,\n js_files = resources.js_files,\n css_files = resources.css_files,\n )\n script, div = components(plot_object, resources)\n html = template.render(\n title = title,\n plot_resources = plot_resources,\n plot_script = script,\n plot_div = div,\n )\n return encode_utf8(html)\n\n\ndef autoload_static(plot_object, resources, script_path):\n ''' Return JavaScript code and a script tag that can be used to embed\n Bokeh Plots.\n\n The data for the plot is stored directly in the returned JavaScript code.\n\n Args:\n plot_object (PlotObject) :\n resources (Resources) :\n script_path (str) :\n\n Returns:\n (js, tag) :\n JavaScript code to be saved at ``script_path`` and a ``